diff --git a/azext/batch/_file_utils.py b/azext/batch/_file_utils.py index b1376276..09fb99eb 100644 --- a/azext/batch/_file_utils.py +++ b/azext/batch/_file_utils.py @@ -320,36 +320,23 @@ def get_container_list(self, source): def resolve_resource_file(self, resource_file): """Convert new resourceFile reference to server-supported reference""" - if resource_file.http_url: - # Support original resourceFile reference - if not resource_file.file_path: - raise ValueError('Malformed ResourceFile: \'httpUrl\' must ' - 'also have \'file_path\' attribute') - return [resource_file] - - if resource_file.storage_container_url or resource_file.auto_storage_container_name: - return [resource_file] - - if not hasattr(resource_file, 'source') or not resource_file.source: - raise ValueError('Malformed ResourceFile: Must have either ' - ' \'source\' or \'httpUrl\'') - storage_client = self.resolve_storage_account() container = None blobs = [] - - if resource_file.source.file_group: - # Input data stored in auto-storage - container = get_container_name(resource_file.source.file_group) - blobs = self.list_container_contents(resource_file.source, container, storage_client) - return convert_blobs_to_resource_files(blobs, resource_file) - if resource_file.source.container_url: - # Input data storage in arbitrary container - uri = urlsplit(resource_file.source.container_url) - container = uri.pathname.split('/')[1] - blobs = self.list_container_contents(resource_file.source, container, storage_client) - return convert_blobs_to_resource_files(blobs, resource_file) - if resource_file.source.url: - # TODO: Input data from an arbitrary HTTP GET source - raise ValueError('Not implemented') - raise ValueError('Malformed ResourceFile') + if hasattr(resource_file, 'source') and resource_file.source: + if resource_file.source.file_group: + # Input data stored in auto-storage + container = get_container_name(resource_file.source.file_group) + blobs = self.list_container_contents(resource_file.source, container, storage_client) + return convert_blobs_to_resource_files(blobs, resource_file) + if resource_file.source.container_url: + # Input data storage in arbitrary container + uri = urlsplit(resource_file.source.container_url) + container = uri.pathname.split('/')[1] + blobs = self.list_container_contents(resource_file.source, container, storage_client) + return convert_blobs_to_resource_files(blobs, resource_file) + if resource_file.source.url: + # TODO: Input data from an arbitrary HTTP GET source + raise ValueError('Not implemented') + + return [resource_file] diff --git a/azext/batch/_template_utils.py b/azext/batch/_template_utils.py index d72774bf..8d9e46c5 100644 --- a/azext/batch/_template_utils.py +++ b/azext/batch/_template_utils.py @@ -9,7 +9,7 @@ import copy import itertools import json -from logging import getLogger +import logging import re from msrest.serialization import Model try: @@ -21,7 +21,7 @@ from . import _pool_utils as pool_utils from . import models -logger = getLogger(__name__) +logger = logging.getLogger(__name__) try: _UNICODE_TYPE = unicode except NameError: diff --git a/azext/batch/batch_extensions_client.py b/azext/batch/batch_extensions_client.py index 5a9b9f20..00f5baf4 100644 --- a/azext/batch/batch_extensions_client.py +++ b/azext/batch/batch_extensions_client.py @@ -8,7 +8,7 @@ from six.moves.urllib.parse import urlsplit # pylint: disable=import-error,relative-import from msrest import Serializer, Deserializer -from azure.batch import BatchServiceClient +from ..generated.sdk.batch import BatchServiceClient from azure.mgmt.batch import BatchManagementClient from azure.mgmt.storage import StorageManagementClient from azure.storage.blob import BlockBlobService @@ -76,12 +76,9 @@ def __init__(self, credentials=None, batch_url=None, subscription_id=None, self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) - self.pool = ExtendedPoolOperations( - self, self._client, self.config, self._serialize, self._deserialize, self._storage_account) - self.job = ExtendedJobOperations( - self, self._client, self.config, self._serialize, self._deserialize, self._storage_account) - self.file = ExtendedFileOperations( - self, self._client, self.config, self._serialize, self._deserialize, self._storage_account) + self.pool_extensions = ExtendedPoolOperations(self, self._client, self.config, self._serialize, self._deserialize, self._storage_account) + self.job_extensions = ExtendedJobOperations(self, self._client, self.config, self._serialize, self._deserialize, self._storage_account) + self.file_extensions = ExtendedFileOperations(self, self._client, self.config, self._serialize, self._deserialize, self._storage_account) def _get_cli_profile(self, subscription_id): # pylint:disable=no-self-use try: diff --git a/azext/batch/errors.py b/azext/batch/errors.py index c3bb39ae..99433518 100644 --- a/azext/batch/errors.py +++ b/azext/batch/errors.py @@ -2,7 +2,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -from azure.batch.models import BatchErrorException +from .models import BatchErrorException class MissingParameterValue(ValueError): diff --git a/azext/batch/models/__init__.py b/azext/batch/models/__init__.py index 9f08fca0..99ff53df 100644 --- a/azext/batch/models/__init__.py +++ b/azext/batch/models/__init__.py @@ -7,8 +7,7 @@ # Not ideal syntax - but saves us having to check and repopulate this # list every time the SDK is regenerated. -from azure.batch.models import * - +from ...generated.sdk.batch.v2019_08_01.models import * try: from ._models_py3 import ExtendedTaskParameter from ._models_py3 import ExtendedJobParameter diff --git a/azext/batch/models/_models.py b/azext/batch/models/_models.py index 5b0bb8e6..177e5833 100644 --- a/azext/batch/models/_models.py +++ b/azext/batch/models/_models.py @@ -898,6 +898,7 @@ class ExtendedResourceFile(models.ResourceFile): 'http_url': {'key': 'httpUrl', 'type': 'str'}, 'auto_storage_container_name': {'key': 'autoStorageContainerName', 'type': 'str'}, 'blob_prefix': {'key': 'blobPrefix', 'type': 'str'}, + 'blob_source': {'key': 'blobSource', 'type': 'str'}, 'storage_container_url': {'key': 'storageContainerUrl', 'type': 'str'}, 'file_path': {'key': 'filePath', 'type': 'str'}, 'file_mode': {'key': 'fileMode', 'type': 'str'}, diff --git a/azext/batch/models/_models_py3.py b/azext/batch/models/_models_py3.py index e20f22cd..b27e97d3 100644 --- a/azext/batch/models/_models_py3.py +++ b/azext/batch/models/_models_py3.py @@ -5,11 +5,18 @@ # pylint: disable=too-many-lines +import importlib import os -import azure.batch.models as models from msrest.serialization import Model from .constants import ATTRS_RESERVED_FOR_TEMPLATES +models_base = "azext.generated.sdk.batch.v2019_08_01.models.{}" +JobAddParameter = importlib.import_module(models_base.format("JobAddParameter")) +PoolAddParameter = importlib.import_module(models_base.format("PoolAddParameter")) +TaskAddParameter = importlib.import_module(models_base.format("TaskAddParameter")) +PoolSpecification = importlib.import_module(models_base.format("PoolSpecification")) +ResourceFile = importlib.import_module(models_base.format("ResourceFile")) + class TaskFactoryBase(Model): """A Task Factory for automatically adding a collection of tasks to a job on @@ -326,7 +333,7 @@ def __init__(self, *, id: str, version: str=None, allow_empty_checksums: bool=No self.type = 'chocolateyPackage' -class ExtendedJobParameter(models.JobAddParameter): +class ExtendedJobParameter(Model): """An Azure Batch job to add. :param id: A string that uniquely identifies the job within the account. @@ -430,6 +437,9 @@ class ExtendedJobParameter(models.JobAddParameter): 'on_task_failure', 'task_factory', 'job_preparation_task', 'job_release_task'. :type application_template_info: :class:`ApplicationTemplateInfo ` + :param base_model: A reference to the object this class should extend + :type base_model: :class:`Model + ` """ _validation = { @@ -452,15 +462,17 @@ class ExtendedJobParameter(models.JobAddParameter): 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, 'task_factory': {'key': 'taskFactory', 'type': 'TaskFactoryBase'}, - 'application_template_info': {'key': 'applicationTemplateInfo', 'type': 'ApplicationTemplateInfo'} + 'application_template_info': {'key': 'applicationTemplateInfo', 'type': 'ApplicationTemplateInfo'}, + 'base_model': {'key': 'baseModel', 'type': 'Model'} } def __init__(self, *, id: str, pool_info, display_name: str=None, priority: int=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, on_all_tasks_complete=None, on_task_failure=None, metadata=None, uses_task_dependencies: bool=None, task_factory=None, - application_template_info=None, **kwargs) -> None: - super(ExtendedJobParameter, self).__init__( + application_template_info=None, base_model=JobAddParameter, **kwargs) -> None: + base_model.__init__( + self, id=id, display_name=display_name, priority=priority, @@ -499,11 +511,15 @@ class ExtendedOutputFileDestination(Model): combined with container. :type auto_storage: :class:`OutputFileAutoStorageDestination ` + :param base_model: A reference to the object this class should extend + :type base_model: :class:`Model + ` """ _attribute_map = { 'container': {'key': 'container', 'type': 'OutputFileBlobContainerDestination'}, 'auto_storage': {'key': 'autoStorage', 'type': 'OutputFileAutoStorageDestination'}, + 'base_model': {'key': 'baseModel', 'type': 'Model'} } def __init__(self, *, container=None, auto_storage=None, **kwargs) -> None: @@ -514,7 +530,7 @@ def __init__(self, *, container=None, auto_storage=None, **kwargs) -> None: self.auto_storage = auto_storage -class ExtendedPoolParameter(models.PoolAddParameter): +class ExtendedPoolParameter(Model): """A pool in the Azure Batch service to add. :param id: Required. A string that uniquely identifies the Pool within the @@ -653,6 +669,9 @@ class ExtendedPoolParameter(models.PoolAddParameter): operating system. :type package_references: list of :class:`PackageReferenceBase ` + :param base_model: A reference to the object this class should extend + :type base_model: :class:`Model + ` """ _validation = { @@ -683,7 +702,8 @@ class ExtendedPoolParameter(models.PoolAddParameter): 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, 'mount_configuration': {'key': 'mountConfiguration', 'type': '[MountConfiguration]'}, - 'package_references': {'key': 'packageReferences', 'type': '[PackageReferenceBase]'} + 'package_references': {'key': 'packageReferences', 'type': '[PackageReferenceBase]'}, + 'base_model': {'key': 'baseModel', 'type': 'Model'} } def __init__(self, *, id: str, vm_size: str, display_name: str=None, cloud_service_configuration=None, @@ -692,9 +712,10 @@ def __init__(self, *, id: str, vm_size: str, display_name: str=None, cloud_servi auto_scale_evaluation_interval=None, enable_inter_node_communication: bool=None, network_configuration=None, start_task=None, certificate_references=None, application_package_references=None, application_licenses=None, max_tasks_per_node: int=None, task_scheduling_policy=None, user_accounts=None, metadata=None, - mount_configuration=None, package_references=None, + mount_configuration=None, package_references=None, base_model=PoolAddParameter, **kwargs) -> None: - super(ExtendedPoolParameter, self).__init__( + base_model.__init__( + self, id=id, display_name=display_name, vm_size=vm_size, @@ -721,7 +742,7 @@ def __init__(self, *, id: str, vm_size: str, display_name: str=None, cloud_servi self.package_references = package_references -class ExtendedPoolSpecification(models.PoolSpecification): +class ExtendedPoolSpecification(Model): """Specification for creating a new pool. :param display_name: The display name for the pool. The display name need @@ -856,6 +877,9 @@ class ExtendedPoolSpecification(models.PoolSpecification): operating system. :type package_references: list of :class:`PackageReferenceBase ` + :param base_model: A reference to the object this class should extend + :type base_model: :class:`Model + ` """ _validation = { @@ -886,7 +910,8 @@ class ExtendedPoolSpecification(models.PoolSpecification): 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, - 'package_references': {'key': 'packageReferences', 'type': '[PackageReferenceBase]'} + 'package_references': {'key': 'packageReferences', 'type': '[PackageReferenceBase]'}, + 'base_model': {'key': 'baseModel', 'type': 'Model'} } def __init__(self, *, vm_size: str, display_name: str=None, cloud_service_configuration=None, @@ -895,8 +920,10 @@ def __init__(self, *, vm_size: str, display_name: str=None, cloud_service_config enable_auto_scale: bool=None, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, enable_inter_node_communication: bool=None, network_configuration=None, start_task=None, certificate_references=None, application_package_references=None, application_licenses=None, - user_accounts=None, metadata=None, package_references=None, **kwargs) -> None: - super(ExtendedPoolSpecification, self).__init__( + user_accounts=None, metadata=None, package_references=None, base_model=PoolSpecification, + **kwargs) -> None: + base_model.__init__( + self, display_name=display_name, vm_size=vm_size, cloud_service_configuration=cloud_service_configuration, @@ -921,7 +948,7 @@ def __init__(self, *, vm_size: str, display_name: str=None, cloud_service_config self.package_references = package_references -class ExtendedResourceFile(models.ResourceFile): +class ExtendedResourceFile(Model): """A file to be downloaded from Azure blob storage to a compute node. :param http_url: The URL of the file within Azure Blob Storage. This @@ -969,6 +996,9 @@ class ExtendedResourceFile(models.ResourceFile): a Azure Storage container or an auto-storage file group. :type source: :class:`FileSource ` + :param base_model: A reference to the object this class should extend + :type base_model: :class:`Model + ` """ _attribute_map = { @@ -978,7 +1008,9 @@ class ExtendedResourceFile(models.ResourceFile): 'storage_container_url': {'key': 'storageContainerUrl', 'type': 'str'}, 'file_path': {'key': 'filePath', 'type': 'str'}, 'file_mode': {'key': 'fileMode', 'type': 'str'}, - 'source': {'key': 'source', 'type': 'FileSource'} + 'blob_source': {'key': 'blobSource', 'type': 'str'}, + 'source': {'key': 'source', 'type': 'FileSource'}, + 'base_model': {'key': 'baseModel', 'type': 'Model'} } def __init__(self, @@ -989,8 +1021,11 @@ def __init__(self, blob_prefix: str=None, file_path: str=None, file_mode: str=None, - source=None, **kwargs) -> None: - super(ExtendedResourceFile, self).__init__( + source=None, + base_model=ResourceFile, + **kwargs) -> None: + base_model.__init__( + self, http_url=http_url, auto_storage_container_name=auto_storage_container_name, storage_container_url=storage_container_url, @@ -1001,7 +1036,7 @@ def __init__(self, self.source = source -class ExtendedTaskParameter(models.TaskAddParameter): +class ExtendedTaskParameter(Model): """An Azure Batch task to add. :param id: A string that uniquely identifies the task within the job. The @@ -1107,6 +1142,9 @@ class ExtendedTaskParameter(models.TaskAddParameter): operating system. :type package_references: list of :class:`PackageReferenceBase ` + :param base_model: A reference to the object this class should extend + :type base_model: :class:`Model + ` """ _validation = { @@ -1132,7 +1170,8 @@ class ExtendedTaskParameter(models.TaskAddParameter): 'type': '[ApplicationPackageReference]'}, 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, - 'package_references': {'key': 'packageReferences', 'type': '[PackageReferenceBase]'} + 'package_references': {'key': 'packageReferences', 'type': '[PackageReferenceBase]'}, + 'base_model': {'key': 'baseModel', 'type': 'Model'} } def __init__(self, *, id: str, command_line: str, display_name: str=None, container_settings=None, @@ -1140,8 +1179,9 @@ def __init__(self, *, id: str, command_line: str, display_name: str=None, contai affinity_info=None, constraints=None, user_identity=None, multi_instance_settings=None, depends_on=None, application_package_references=None, authentication_token_settings=None, - package_references=None, **kwargs) -> None: - super(ExtendedTaskParameter, self).__init__( + package_references=None, base_model=TaskAddParameter, **kwargs) -> None: + base_model.__init__( + self, id=id, display_name=display_name, command_line=command_line, diff --git a/azext/batch/models/constants.py b/azext/batch/models/constants.py index 1779fdca..76841a0f 100644 --- a/azext/batch/models/constants.py +++ b/azext/batch/models/constants.py @@ -65,6 +65,21 @@ 'dependsOn'}) -# Dates used as cutoffs for different SDK extension versions -class KnownTemplateVersion(Enum): - Dec2018 = "2018-12-01" +# Ensure the first member of this array is the official REST Version +class SupportedRestApi(Enum): + Aug2018 = ["2018-08-01.7.0", "2018-08-01"] + Dec2018 = ["2018-12-01.8.0", "2018-12-01"] + Jun2019 = ["2019-06-01.9.0", "2019-06-01"] + Aug2019 = ["2019-08-01.10.0", "2019-08-01", "latest"] + + +class SupportedTemplateApi(Enum): + Latest = ["latest"] + + +SupportRestApiToSdkVersion = { + SupportedRestApi.Aug2018: "2018_08_01", + SupportedRestApi.Dec2018: "2018_12_01", + SupportedRestApi.Jun2019: "2019_06_01", + SupportedRestApi.Aug2019: "2019_08_01", +} diff --git a/azext/batch/operations/file_operations.py b/azext/batch/operations/file_operations.py index 977dd8c4..cf25fee2 100644 --- a/azext/batch/operations/file_operations.py +++ b/azext/batch/operations/file_operations.py @@ -8,13 +8,12 @@ import errno import os -from azure.batch.operations._file_operations import FileOperations from azure.storage.blob.models import Include from .. import _file_utils as file_utils -class ExtendedFileOperations(FileOperations): +class ExtendedFileOperations: """FileOperations operations. :param parent: The parent BatchExtensionsClient object. @@ -25,7 +24,6 @@ class ExtendedFileOperations(FileOperations): :param get_storage_account: A callable to retrieve a storage client object. """ def __init__(self, parent, client, config, serializer, deserializer, get_storage_account): - super(ExtendedFileOperations, self).__init__(client, config, serializer, deserializer) self._parent = parent self.get_storage_client = get_storage_account diff --git a/azext/batch/operations/job_operations.py b/azext/batch/operations/job_operations.py index 6f0cb132..81362735 100644 --- a/azext/batch/operations/job_operations.py +++ b/azext/batch/operations/job_operations.py @@ -4,17 +4,20 @@ # -------------------------------------------------------------------------------------------- from __future__ import unicode_literals -from datetime import datetime as dt +import importlib +import logging + from msrest.exceptions import DeserializationError -from azure.batch.operations._job_operations import JobOperations -from .. import models -from .. import _template_utils as templates -from .. import _pool_utils as pool_utils +from ..models import JobTemplate, ExtendedJobParameter +from .. import _template_utils +from .. import _pool_utils from .._file_utils import FileUtils -from ..models.constants import KnownTemplateVersion +from ..models.constants import SupportedRestApi, SupportRestApiToSdkVersion + +logger = logging.getLogger(__name__) -class ExtendedJobOperations(JobOperations): +class ExtendedJobOperations: """JobOperations operations. :param parent: The parent BatchExtensionsClient object. @@ -25,7 +28,6 @@ class ExtendedJobOperations(JobOperations): :param get_storage_account: A callable to retrieve a storage client object. """ def __init__(self, parent, client, config, serializer, deserializer, get_storage_account): - super(ExtendedJobOperations, self).__init__(client, config, serializer, deserializer) self._parent = parent self.get_storage_client = get_storage_account @@ -53,7 +55,7 @@ def expand_template(template, parameters=None): :param template: The template data. Must be a dictionary. :param parameters: The values of parameters to be substituted into the template. Must be a dictionary. - :returns: The pool specification JSON dictionary. + :returns: The job specification JSON dictionary. """ if not isinstance(template, dict): raise ValueError("template isn't a JSON dictionary") @@ -61,7 +63,7 @@ def expand_template(template, parameters=None): raise ValueError("parameters isn't a JSON dictionary") elif not parameters: parameters = {} - expanded_job_object = templates.expand_template(template, parameters) + expanded_job_object = _template_utils.expand_template(template, parameters) try: return expanded_job_object['job'] except KeyError: @@ -73,22 +75,35 @@ def jobparameter_from_json(json_data): :param dict json_data: The JSON specification of an AddJobParameter or an ExtendedJobParameter or a JobTemplate """ + # json_data = templates.convert_blob_source_to_http_url(json_data) + api_version_raw = json_data.get('apiVersion') + if api_version_raw: + api_version = None + for valid_version in SupportedRestApi: + if api_version_raw in valid_version.value: + api_version = valid_version + break + + if api_version and SupportRestApiToSdkVersion[api_version] != "latest": + models_base = "azext.generated.sdk.batch.v{}.models".format( + SupportRestApiToSdkVersion[api_version]) + importlib.import_module(models_base) + return ExtendedJobOperations._jobparameter_from_json(json_data) + else: + logging.warning("Invalid apiVersion, defaulting to latest") + return ExtendedJobOperations._jobparameter_from_json( + json_data) + + @staticmethod + def _jobparameter_from_json(json_data): result = 'JobTemplate' if json_data.get('properties') else 'ExtendedJobParameter' - json_data = templates.convert_blob_source_to_http_url(json_data) try: if result == 'JobTemplate': - if 'apiVersion' in json_data: - max_datetime = dt.strptime(KnownTemplateVersion.Dec2018.value, "%Y-%m-%d") - specified_datetime = dt.strptime(json_data['apiVersion'], "%Y-%m-%d") - if max_datetime < specified_datetime: - raise NotImplementedError( - "This SDK does not have template API version {} implemented".format( - json_data['apiVersion'])) - job = models.JobTemplate.from_dict(json_data) + job = JobTemplate.from_dict(json_data) else: - job = models.ExtendedJobParameter.from_dict(json_data) + job = ExtendedJobParameter.from_dict(json_data) if job is None: - raise ValueError("JSON file is not in correct format.") + raise ValueError("JSON data is not in correct format.") return job except NotImplementedError: raise @@ -136,18 +151,98 @@ def add(self, job, job_add_options=None, custom_headers=None, raw=False, :raises: :class:`BatchErrorException` """ - if isinstance(job, models.JobTemplate): - if job.api_version: - max_datetime = dt.strptime(KnownTemplateVersion.Dec2018.value, "%Y-%m-%d") - specified_datetime = dt.strptime(job.api_version, "%Y-%m-%d") - if max_datetime < specified_datetime: - raise NotImplementedError("This SDK does not have template API version {} implemetned".format( - job.api_version)) + original_api_version = None + api_version = None + api_version_raw = getattr(job, 'api_version', None) + if api_version_raw: + for valid_version in SupportedRestApi: + if api_version_raw in valid_version.value: + api_version = valid_version + break + + if api_version and SupportRestApiToSdkVersion[api_version] != "latest": + if isinstance(job, JobTemplate): + job = job.properties + else: + logging.warning("Invalid apiVersion, defaulting to latest") + api_version = None + + if isinstance(job, JobTemplate): job = job.properties + + try: + if api_version: + original_api_version = self._parent.api_version + self._parent.api_version = api_version.value[0] + ret = self._add( + job, + job_add_options, + custom_headers, + raw, + threads, + api_version.value[0], + **operation_config) + self._parent.api_version = original_api_version + return ret + return self._add( + job, + job_add_options, + custom_headers, + raw, + threads, + **operation_config) + except Exception as e: # pylint: disable=broad-except + if original_api_version: + self.api_version = original_api_version + self._parent.task.api_version = original_api_version + raise e + add.metadata = {'url': '/jobs'} + + def _add(self, job, job_add_options, custom_headers, raw, + threads, api_version, **operation_config): + """Adds a job to the specified account. + + The Batch service supports two ways to control the work done as part of + a job. In the first approach, the user specifies a Job Manager task. + The Batch service launches this task when it is ready to start the job. + The Job Manager task controls all other tasks that run under this job, + by using the Task APIs. In the second approach, the user directly + controls the execution of tasks under an active job, by using the Task + APIs. Also note: when naming jobs, avoid including sensitive + information such as user names or secret project names. This + information may appear in telemetry logs accessible to Microsoft + Support engineers. + + :param job: The job to be added. + :type job: :class:`JobAddParameter` or + :class:`ExtendedJobParameter` + or :class:`JobTemplate` + :param job_add_options: Additional parameters for the operation + :type job_add_options: :class:`JobAddOptions + ` + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param int threads: number of threads to use in parallel when adding tasks. + If specified will start additional threads to submit requests and + wait for them to finish. Defaults to half of cpu count(floor) + :param operation_config: :ref:`Operation configuration + overrides`. + :return: :class:`TaskAddCollectionResult + ` if using TaskFactory or + :class:`ClientRawResponse` if + raw=true, otherwise None + :rtype: None or :class:`TaskAddCollectionResult + ` or + :class:`ClientRawResponse` + :raises: + :class:`BatchErrorException` + """ + models = self._parent.models(api_version) # Process an application template reference. if hasattr(job, 'application_template_info') and job.application_template_info: try: - templates.expand_application_template(job, self._deserialize) + _template_utils.expand_application_template(job, self._deserialize) except DeserializationError as error: raise ValueError("Failed to load application template from '{}': {}". format(job.application_template_info.file_path, error)) @@ -157,9 +252,9 @@ def add(self, job, job_add_options=None, custom_headers=None, raw=False, task_collection = [] file_utils = FileUtils(self.get_storage_client) if hasattr(job, 'task_factory') and job.task_factory: - if templates.has_merge_task(job): + if _template_utils.has_merge_task(job): job.uses_task_dependencies = True - task_collection = templates.expand_task_factory(job, file_utils) + task_collection = _template_utils.expand_task_factory(job, file_utils) # If job has a task factory and terminate job on all tasks complete is set, the job will # already be terminated when we add the tasks, so we need to set to noAction, then patch @@ -168,11 +263,11 @@ def add(self, job, job_add_options=None, custom_headers=None, raw=False, auto_complete = job.on_all_tasks_complete job.on_all_tasks_complete = 'noaction' - should_get_pool = templates.should_get_pool(job, task_collection) + should_get_pool = _template_utils.should_get_pool(job, task_collection) pool_os_flavor = None if should_get_pool: pool = self._get_target_pool(job) - pool_os_flavor = pool_utils.get_pool_target_os_type(pool) + pool_os_flavor = _pool_utils.get_pool_target_os_type(pool) # Handle package management on autopool if job.pool_info.auto_pool_specification \ @@ -180,27 +275,27 @@ def add(self, job, job_add_options=None, custom_headers=None, raw=False, and job.pool_info.auto_pool_specification.pool.package_references: pool = job.pool_info.auto_pool_specification.pool - cmds = [templates.process_pool_package_references(pool)] + cmds = [_template_utils.process_pool_package_references(pool)] pool.start_task = models.StartTask( - **templates.construct_setup_task(pool.start_task, cmds, pool_os_flavor)) + **_template_utils.construct_setup_task(pool.start_task, cmds, pool_os_flavor)) commands = [] # Handle package management on tasks. - commands.append(templates.process_task_package_references( + commands.append(_template_utils.process_task_package_references( task_collection, pool_os_flavor)) - job_prep_task_parameters = templates.construct_setup_task( + job_prep_task_parameters = _template_utils.construct_setup_task( job.job_preparation_task, commands, pool_os_flavor) if job_prep_task_parameters: job.job_preparation_task = models.JobPreparationTask(**job_prep_task_parameters) # Handle any extended resource file references. - templates.post_processing(job, file_utils, pool_os_flavor) + _template_utils.post_processing(job, file_utils, pool_os_flavor) if task_collection: - templates.post_processing(task_collection, file_utils, pool_os_flavor) - templates.process_job_for_output_files(job, task_collection, file_utils) + _template_utils.post_processing(task_collection, file_utils, pool_os_flavor) + _template_utils.process_job_for_output_files(job, task_collection, file_utils) # Begin original job add process - result = super(ExtendedJobOperations, self).add( + result = self._parent.job.add( job, job_add_options, custom_headers, raw, **operation_config) if task_collection: try: @@ -210,15 +305,14 @@ def add(self, job, job_add_options=None, custom_headers=None, raw=False, None, None, raw, - threads) - except Exception: + threads=threads) + except Exception as e: # If task submission raises, we roll back the job - self.delete(job.id) - raise + # self.delete(job.id) + raise e if auto_complete: # If the option to terminate the job was set, we need to reapply it with a patch # now that the tasks have been added. - self.patch(job.id, {'on_all_tasks_complete': auto_complete}) + self._parent.job.patch(job.id, {'on_all_tasks_complete': auto_complete}) return tasks return result - add.metadata = {'url': '/jobs'} diff --git a/azext/batch/operations/pool_operations.py b/azext/batch/operations/pool_operations.py index 1deb4d73..f68f32be 100644 --- a/azext/batch/operations/pool_operations.py +++ b/azext/batch/operations/pool_operations.py @@ -3,16 +3,20 @@ # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -from datetime import datetime as dt +import importlib +import logging + from azure.batch.operations._pool_operations import PoolOperations -from .. import models -from .. import _file_utils as file_utils -from .. import _pool_utils as pool_utils -from .. import _template_utils as templates -from ..models.constants import KnownTemplateVersion +from ..models import PoolTemplate, ExtendedPoolParameter +from ..models.constants import SupportedRestApi, SupportRestApiToSdkVersion +from .. import _file_utils +from .. import _pool_utils +from .. import _template_utils + +logger = logging.getLogger(__name__) -class ExtendedPoolOperations(PoolOperations): +class ExtendedPoolOperations: """PoolOperations operations. :param parent: The parent BatchExtensionsClient object. @@ -23,7 +27,6 @@ class ExtendedPoolOperations(PoolOperations): :param get_storage_account: A callable to retrieve a storage client object. """ def __init__(self, parent, client, config, serializer, deserializer, get_storage_account): - super(ExtendedPoolOperations, self).__init__(client, config, serializer, deserializer) self._parent = parent self.get_storage_client = get_storage_account @@ -41,11 +44,13 @@ def expand_template(template, parameters=None): raise ValueError("parameters isn't a JSON dictionary") elif not parameters: parameters = {} - expanded_pool_object = templates.expand_template(template, parameters) + expanded_pool_object = _template_utils.expand_template(template, parameters) try: + # If JobParameter only return content return expanded_pool_object['pool'] except KeyError: - raise ValueError("Template missing required 'pool' element") + # Else return full template + return expanded_pool_object @staticmethod def poolparameter_from_json(json_data): @@ -53,20 +58,37 @@ def poolparameter_from_json(json_data): :param dict json_data: The JSON specification of an AddPoolParameter or an ExtendedPoolParameter or a PoolTemplate. """ + api_version_raw = json_data.get('apiVersion') + if api_version_raw: + api_version = None + for valid_version in SupportedRestApi: + if api_version_raw in valid_version.value: + api_version = valid_version + break + + if api_version and SupportRestApiToSdkVersion[api_version] != "latest": + models_base = "azext.generated.sdk.batch.v{}.models".format( + SupportRestApiToSdkVersion[api_version]) + importlib.import_module(models_base) + return ExtendedPoolOperations._poolparameter_from_json(json_data) + else: + logging.warning("Invalid apiVersion, defaulting to latest") + return ExtendedPoolOperations._poolparameter_from_json(json_data) + + + @staticmethod + def _poolparameter_from_json(json_data): + """Create an ExtendedPoolParameter object from a JSON specification. + :param dict json_data: The JSON specification of an AddPoolParameter or an + ExtendedPoolParameter or a PoolTemplate. + :param module models: models to deserialize from + """ result = 'PoolTemplate' if json_data.get('properties') else 'ExtendedPoolParameter' - json_data = templates.convert_blob_source_to_http_url(json_data) try: if result == 'PoolTemplate': - if 'apiVersion' in json_data: - max_datetime = dt.strptime(KnownTemplateVersion.Dec2018.value, "%Y-%m-%d") - specified_datetime = dt.strptime(json_data['apiVersion'], "%Y-%m-%d") - if max_datetime < specified_datetime: - raise NotImplementedError( - "This SDK does not have template API version {} implemented".format( - json_data['apiVersion'])) - pool = models.PoolTemplate.from_dict(json_data) + pool = PoolTemplate.from_dict(json_data) else: - pool = models.ExtendedPoolParameter.from_dict(json_data) + pool = ExtendedPoolParameter.from_dict(json_data) if pool is None: raise ValueError("JSON data is not in correct format.") return pool @@ -75,8 +97,7 @@ def poolparameter_from_json(json_data): except Exception as exp: raise ValueError("Unable to deserialize to {}: {}".format(result, exp)) - def add( - self, pool, pool_add_options=None, custom_headers=None, raw=False, **operation_config): + def add(self, pool, pool_add_options=None, custom_headers=None, raw=False, **operation_config): """Adds a pool to the specified account. When naming pools, avoid including sensitive information such as user @@ -103,27 +124,90 @@ def add( :raises: :class:`BatchErrorException` """ - if isinstance(pool, models.PoolTemplate): - if pool.api_version: - max_datetime = dt.strptime(KnownTemplateVersion.Dec2018.value, "%Y-%m-%d") - specified_datetime = dt.strptime(pool.api_version, "%Y-%m-%d") - if max_datetime < specified_datetime: - raise NotImplementedError("This SDK does not have template API version {} implemetned".format( - pool.api_version)) + original_api_version = None + api_version = None + vendored_models = None + api_version_raw = getattr(pool, 'api_version', None) + if api_version_raw: + for valid_version in SupportedRestApi: + if api_version_raw in valid_version.value: + api_version = valid_version + break + if not api_version: + logging.warning("Invalid apiVersion, defaulting to latest") + + if isinstance(pool, PoolTemplate): pool = pool.properties + try: + if api_version: + original_api_version = self._parent.api_version + self._parent.api_version = api_version.value[0] + ret = self._add( + pool, + pool_add_options, + custom_headers, + raw, + api_version.value[0], + **operation_config) + self._parent.api_version = original_api_version + return ret + else: + return self._add( + pool, + pool_add_options, + custom_headers, + raw, + **operation_config) + except Exception: # pylint: disable=broad-except + if original_api_version: + self._parent.api_version = original_api_version + raise + add.metadata = {'url': '/pools'} + + def _add(self, + pool, + pool_add_options, + custom_headers, + raw, + api_version=None, + **operation_config): + """ Internal add method for pool - pool_os_flavor = pool_utils.get_pool_target_os_type(pool) + :param pool: The pool to be added. + :type pool: :class:`PoolAddParameter` or + :class:`ExtendedPoolParameter` + or :class:`PoolTemplate` + :param pool_add_options: Additional parameters for the operation + :type pool_add_options: :class:`PoolAddOptions + ` + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :param pool_utils: pool utility methods + :param template_utils: template utility methods + :param file_utils: file utility methods + :return: None or + :class:`ClientRawResponse` if + raw=true + :rtype: None or + :class:`ClientRawResponse` + :raises: + :class:`BatchErrorException` + """ + models = self._parent.models(api_version) + pool_os_flavor = _pool_utils.get_pool_target_os_type(pool) # Handle package manangement if hasattr(pool, 'package_references') and pool.package_references: - cmds = [templates.process_pool_package_references(pool)] + cmds = [_template_utils.process_pool_package_references(pool)] # Update the start task command - pool.start_task = models.StartTask(**templates.construct_setup_task( + pool.start_task = models.StartTask(**_template_utils.construct_setup_task( pool.start_task, cmds, pool_os_flavor)) # Handle any extended resource file references. - fileutils = file_utils.FileUtils(self.get_storage_client) - templates.post_processing(pool, fileutils, pool_os_flavor) + fileutils = _file_utils.FileUtils(self.get_storage_client) + _template_utils.post_processing(pool, fileutils, pool_os_flavor) - return super(ExtendedPoolOperations, self).add(pool, pool_add_options, custom_headers, raw, **operation_config) - add.metadata = {'url': '/pools'} + return self._parent.pool.add(pool, pool_add_options, custom_headers, raw, **operation_config) diff --git a/azext/generated/sdk/batch/__init__.py b/azext/generated/sdk/batch/__init__.py new file mode 100644 index 00000000..887e58fd --- /dev/null +++ b/azext/generated/sdk/batch/__init__.py @@ -0,0 +1,3 @@ +from ._batch_service_client import BatchServiceClient, BatchServiceClientConfiguration + +__all__ = ['BatchServiceClient', 'BatchServiceClientConfiguration'] diff --git a/azext/generated/sdk/batch/_batch_service_client.py b/azext/generated/sdk/batch/_batch_service_client.py new file mode 100644 index 00000000..d7307dc5 --- /dev/null +++ b/azext/generated/sdk/batch/_batch_service_client.py @@ -0,0 +1,264 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.service_client import SDKClient +from msrest import Serializer, Deserializer +from msrestazure import AzureConfiguration + +from azure.profiles import KnownProfiles, ProfileDefinition +from azure.profiles.multiapiclient import MultiApiClientMixin + +class BatchServiceClientConfiguration(AzureConfiguration): + """Configuration for BatchServiceClient + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credentials: Credentials needed for the client to connect to Azure. + :type credentials: :mod:`A msrestazure Credentials + object` + :param batch_url: The base URL for all Azure Batch service requests. + :type batch_url: str + """ + + def __init__( + self, credentials, batch_url): + + if credentials is None: + raise ValueError("Parameter 'credentials' must not be None.") + if batch_url is None: + raise ValueError("Parameter 'batch_url' must not be None.") + base_url = '{batchUrl}' + + super(BatchServiceClientConfiguration, self).__init__(base_url) + self.add_user_agent('Azure-SDK-For-Python') + + self.credentials = credentials + self.batch_url = batch_url + +class BatchServiceClient(MultiApiClientMixin, SDKClient): + """Compute Client + + This ready contains multiple API versions, to help you deal with all Azure clouds + (Azure Stack, Azure Government, Azure China, etc.). + By default, uses latest API version available on public Azure. + For production, you should stick a particular api-version and/or profile. + The profile sets a mapping between the operation group and an API version. + The api-version parameter sets the default API version if the operation + group is not described in the profile. + + :ivar config: Configuration for client. + :vartype config: ComputeManagementClientConfiguration + + :param credentials: Credentials needed for the client to connect to Azure. + :type credentials: :mod:`A msrestazure Credentials + object` + :param subscription_id: Subscription credentials which uniquely identify + Microsoft Azure subscription. The subscription ID forms part of the URI + for every service call. + :type subscription_id: str + :param str api_version: API version to use if no profile is provided, or if + missing in profile. + :param str base_url: Service URL + :param profile: A profile definition, from KnownProfiles to dict. + :type profile: azure.profiles.KnownProfiles + """ + + DEFAULT_API_VERSION = '2019-08-01.10.0' + _PROFILE_TAG = "azure.batch.ComputeManagementClient" + LATEST_PROFILE = ProfileDefinition({ + _PROFILE_TAG: { + None: DEFAULT_API_VERSION, + }}, + _PROFILE_TAG + " latest" + ) + + def __init__( + self, credentials, batch_url, api_version=None, profile=KnownProfiles.default): + + self.config = BatchServiceClientConfiguration(credentials, batch_url) + super(BatchServiceClient, self).__init__(self.config.credentials, self.config) + self.api_version = '2019-08-01.10.0' + + @classmethod + def _models_dict(cls, api_version): + return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)} + + @classmethod + def models(cls, api_version=None): + """Module depends on the API version: + """ + api_version = api_version if api_version else cls.DEFAULT_API_VERSION + if api_version == '2018-08-01.7.0': + from .v2018_08_01 import models + return models + elif api_version == '2018-12-01.8.0': + from .v2018_12_01 import models + return models + elif api_version == '2019-06-01.9.0': + from .v2019_06_01 import models + return models + elif api_version == '2019-08-01.10.0': + from .v2019_08_01 import models + return models + raise NotImplementedError("APIVersion {} is not available".format(api_version)) + + @property + def application(self): + """Instance depends on the API version + """ + if self.api_version == '2018-08-01.7.0': + from .v2018_08_01.operations import ApplicationOperations + elif self.api_version == '2018-12-01.8.0': + from .v2018_12_01.operations import ApplicationOperations + elif self.api_version == '2019-06-01.9.0': + from .v2019_06_01.operations import ApplicationOperations + elif self.api_version == '2019-08-01.10.0': + from .v2019_08_01.operations import ApplicationOperations + else: + raise NotImplementedError("APIVersion {} is not available".format(self.api_version)) + return ApplicationOperations( + self._client, self.config, Serializer(self._models_dict(self.api_version)), Deserializer(self._models_dict(self.api_version))) + + @property + def pool(self): + """Instance depends on the API version + """ + if self.api_version == '2018-08-01.7.0': + from .v2018_08_01.operations import PoolOperations + elif self.api_version == '2018-12-01.8.0': + from .v2018_12_01.operations import PoolOperations + elif self.api_version == '2019-06-01.9.0': + from .v2019_06_01.operations import PoolOperations + elif self.api_version == '2019-08-01.10.0': + from .v2019_08_01.operations import PoolOperations + else: + raise NotImplementedError("APIVersion {} is not available".format(self.api_version)) + return PoolOperations( + self._client, self.config, Serializer(self._models_dict(self.api_version)), Deserializer(self._models_dict(self.api_version))) + + @property + def account(self): + """Instance depends on the API version + """ + if self.api_version == '2018-08-01.7.0': + from .v2018_08_01.operations import AccountOperations + elif self.api_version == '2018-12-01.8.0': + from .v2018_12_01.operations import AccountOperations + elif self.api_version == '2019-06-01.9.0': + from .v2019_06_01.operations import AccountOperations + elif self.api_version == '2019-08-01.10.0': + from .v2019_08_01.operations import AccountOperations + else: + raise NotImplementedError("APIVersion {} is not available".format(self.api_version)) + return AccountOperations( + self._client, self.config, Serializer(self._models_dict(self.api_version)), Deserializer(self._models_dict(self.api_version))) + + @property + def job(self): + """Instance depends on the API version + """ + if self.api_version == '2018-08-01.7.0': + from .v2018_08_01.operations import JobOperations + elif self.api_version == '2018-12-01.8.0': + from .v2018_12_01.operations import JobOperations + elif self.api_version == '2019-06-01.9.0': + from .v2019_06_01.operations import JobOperations + elif self.api_version == '2019-08-01.10.0': + from .v2019_08_01.operations import JobOperations + else: + raise NotImplementedError("APIVersion {} is not available".format(self.api_version)) + return JobOperations( + self._client, self.config, Serializer(self._models_dict(self.api_version)), Deserializer(self._models_dict(self.api_version))) + + @property + def certificate(self): + """Instance depends on the API version + """ + if self.api_version == '2018-08-01.7.0': + from .v2018_08_01.operations import CertificateOperations + elif self.api_version == '2018-12-01.8.0': + from .v2018_12_01.operations import CertificateOperations + elif self.api_version == '2019-06-01.9.0': + from .v2019_06_01.operations import CertificateOperations + elif self.api_version == '2019-08-01.10.0': + from .v2019_08_01.operations import CertificateOperations + else: + raise NotImplementedError("APIVersion {} is not available".format(self.api_version)) + return CertificateOperations( + self._client, self.config, Serializer(self._models_dict(self.api_version)), Deserializer(self._models_dict(self.api_version))) + + @property + def file(self): + """Instance depends on the API version + """ + if self.api_version == '2018-08-01.7.0': + from .v2018_08_01.operations import FileOperations + elif self.api_version == '2018-12-01.8.0': + from .v2018_12_01.operations import FileOperations + elif self.api_version == '2019-06-01.9.0': + from .v2019_06_01.operations import FileOperations + elif self.api_version == '2019-08-01.10.0': + from .v2019_08_01.operations import FileOperations + else: + raise NotImplementedError("APIVersion {} is not available".format(self.api_version)) + return FileOperations( + self._client, self.config, Serializer(self._models_dict(self.api_version)), Deserializer(self._models_dict(self.api_version))) + + @property + def job_schedule(self): + """Instance depends on the API version + """ + if self.api_version == '2018-08-01.7.0': + from .v2018_08_01.operations import JobScheduleOperations + elif self.api_version == '2018-12-01.8.0': + from .v2018_12_01.operations import JobScheduleOperations + elif self.api_version == '2019-06-01.9.0': + from .v2019_06_01.operations import JobScheduleOperations + elif self.api_version == '2019-08-01.10.0': + from .v2019_08_01.operations import JobScheduleOperations + else: + raise NotImplementedError("APIVersion {} is not available".format(self.api_version)) + return JobScheduleOperations( + self._client, self.config, Serializer(self._models_dict(self.api_version)), Deserializer(self._models_dict(self.api_version))) + + @property + def task(self): + """Instance depends on the API version + """ + if self.api_version == '2018-08-01.7.0': + from .v2018_08_01.operations import TaskOperations + elif self.api_version == '2018-12-01.8.0': + from .v2018_12_01.operations import TaskOperations + elif self.api_version == '2019-06-01.9.0': + from .v2019_06_01.operations import TaskOperations + elif self.api_version == '2019-08-01.10.0': + from .v2019_08_01.operations import TaskOperations + else: + raise NotImplementedError("APIVersion {} is not available".format(self.api_version)) + return TaskOperations( + self._client, self.config, Serializer(self._models_dict(self.api_version)), Deserializer(self._models_dict(self.api_version))) + + @property + def compute_node(self): + """Instance depends on the API version + """ + if self.api_version == '2018-08-01.7.0': + from .v2018_08_01.operations import ComputeNodeOperations + elif self.api_version == '2018-12-01.8.0': + from .v2018_12_01.operations import ComputeNodeOperations + elif self.api_version == '2019-06-01.9.0': + from .v2019_06_01.operations import ComputeNodeOperations + elif self.api_version == '2019-08-01.10.0': + from .v2019_08_01.operations import ComputeNodeOperations + else: + raise NotImplementedError("APIVersion {} is not available".format(self.api_version)) + return ComputeNodeOperations( + self._client, self.config, Serializer(self._models_dict(self.api_version)), Deserializer(self._models_dict(self.api_version))) diff --git a/azext/generated/sdk/batch/v2018_08_01/__init__.py b/azext/generated/sdk/batch/v2018_08_01/__init__.py new file mode 100644 index 00000000..f27e0cb6 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/__init__.py @@ -0,0 +1,18 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .batch_service_client import BatchServiceClient +from .version import VERSION + +__all__ = ['BatchServiceClient'] + +__version__ = VERSION + diff --git a/azext/generated/sdk/batch/v2018_08_01/batch_service_client.py b/azext/generated/sdk/batch/v2018_08_01/batch_service_client.py new file mode 100644 index 00000000..30b6b575 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/batch_service_client.py @@ -0,0 +1,118 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.service_client import SDKClient +from msrest import Serializer, Deserializer +from msrestazure import AzureConfiguration +from .version import VERSION +from .operations.application_operations import ApplicationOperations +from .operations.pool_operations import PoolOperations +from .operations.account_operations import AccountOperations +from .operations.job_operations import JobOperations +from .operations.certificate_operations import CertificateOperations +from .operations.file_operations import FileOperations +from .operations.job_schedule_operations import JobScheduleOperations +from .operations.task_operations import TaskOperations +from .operations.compute_node_operations import ComputeNodeOperations +from . import models + + +class BatchServiceClientConfiguration(AzureConfiguration): + """Configuration for BatchServiceClient + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credentials: Credentials needed for the client to connect to Azure. + :type credentials: :mod:`A msrestazure Credentials + object` + :param batch_url: The base URL for all Azure Batch service requests. + :type batch_url: str + """ + + def __init__( + self, credentials, batch_url): + + if credentials is None: + raise ValueError("Parameter 'credentials' must not be None.") + if batch_url is None: + raise ValueError("Parameter 'batch_url' must not be None.") + base_url = '{batchUrl}' + + super(BatchServiceClientConfiguration, self).__init__(base_url) + + self.add_user_agent('azure-batch/{}'.format(VERSION)) + self.add_user_agent('Azure-SDK-For-Python') + + self.credentials = credentials + self.batch_url = batch_url + + +class BatchServiceClient(SDKClient): + """A client for issuing REST requests to the Azure Batch service. + + :ivar config: Configuration for client. + :vartype config: BatchServiceClientConfiguration + + :ivar application: Application operations + :vartype application: azure.batch.operations.ApplicationOperations + :ivar pool: Pool operations + :vartype pool: azure.batch.operations.PoolOperations + :ivar account: Account operations + :vartype account: azure.batch.operations.AccountOperations + :ivar job: Job operations + :vartype job: azure.batch.operations.JobOperations + :ivar certificate: Certificate operations + :vartype certificate: azure.batch.operations.CertificateOperations + :ivar file: File operations + :vartype file: azure.batch.operations.FileOperations + :ivar job_schedule: JobSchedule operations + :vartype job_schedule: azure.batch.operations.JobScheduleOperations + :ivar task: Task operations + :vartype task: azure.batch.operations.TaskOperations + :ivar compute_node: ComputeNode operations + :vartype compute_node: azure.batch.operations.ComputeNodeOperations + + :param credentials: Credentials needed for the client to connect to Azure. + :type credentials: :mod:`A msrestazure Credentials + object` + :param batch_url: The base URL for all Azure Batch service requests. + :type batch_url: str + """ + + def __init__( + self, credentials, batch_url): + + self.config = BatchServiceClientConfiguration(credentials, batch_url) + super(BatchServiceClient, self).__init__(self.config.credentials, self.config) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self.api_version = '2018-08-01.7.0' + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.application = ApplicationOperations( + self._client, self.config, self._serialize, self._deserialize) + self.pool = PoolOperations( + self._client, self.config, self._serialize, self._deserialize) + self.account = AccountOperations( + self._client, self.config, self._serialize, self._deserialize) + self.job = JobOperations( + self._client, self.config, self._serialize, self._deserialize) + self.certificate = CertificateOperations( + self._client, self.config, self._serialize, self._deserialize) + self.file = FileOperations( + self._client, self.config, self._serialize, self._deserialize) + self.job_schedule = JobScheduleOperations( + self._client, self.config, self._serialize, self._deserialize) + self.task = TaskOperations( + self._client, self.config, self._serialize, self._deserialize) + self.compute_node = ComputeNodeOperations( + self._client, self.config, self._serialize, self._deserialize) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/__init__.py b/azext/generated/sdk/batch/v2018_08_01/models/__init__.py new file mode 100644 index 00000000..5ffc99a7 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/__init__.py @@ -0,0 +1,721 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +try: + from .pool_usage_metrics_py3 import PoolUsageMetrics + from .image_reference_py3 import ImageReference + from .node_agent_sku_py3 import NodeAgentSku + from .authentication_token_settings_py3 import AuthenticationTokenSettings + from .usage_statistics_py3 import UsageStatistics + from .resource_statistics_py3 import ResourceStatistics + from .pool_statistics_py3 import PoolStatistics + from .job_statistics_py3 import JobStatistics + from .name_value_pair_py3 import NameValuePair + from .delete_certificate_error_py3 import DeleteCertificateError + from .certificate_py3 import Certificate + from .application_package_reference_py3 import ApplicationPackageReference + from .application_summary_py3 import ApplicationSummary + from .certificate_add_parameter_py3 import CertificateAddParameter + from .file_properties_py3 import FileProperties + from .node_file_py3 import NodeFile + from .schedule_py3 import Schedule + from .job_constraints_py3 import JobConstraints + from .container_registry_py3 import ContainerRegistry + from .task_container_settings_py3 import TaskContainerSettings + from .resource_file_py3 import ResourceFile + from .environment_setting_py3 import EnvironmentSetting + from .exit_options_py3 import ExitOptions + from .exit_code_mapping_py3 import ExitCodeMapping + from .exit_code_range_mapping_py3 import ExitCodeRangeMapping + from .exit_conditions_py3 import ExitConditions + from .auto_user_specification_py3 import AutoUserSpecification + from .user_identity_py3 import UserIdentity + from .linux_user_configuration_py3 import LinuxUserConfiguration + from .user_account_py3 import UserAccount + from .task_constraints_py3 import TaskConstraints + from .output_file_blob_container_destination_py3 import OutputFileBlobContainerDestination + from .output_file_destination_py3 import OutputFileDestination + from .output_file_upload_options_py3 import OutputFileUploadOptions + from .output_file_py3 import OutputFile + from .job_manager_task_py3 import JobManagerTask + from .job_preparation_task_py3 import JobPreparationTask + from .job_release_task_py3 import JobReleaseTask + from .task_scheduling_policy_py3 import TaskSchedulingPolicy + from .start_task_py3 import StartTask + from .certificate_reference_py3 import CertificateReference + from .metadata_item_py3 import MetadataItem + from .cloud_service_configuration_py3 import CloudServiceConfiguration + from .os_disk_py3 import OSDisk + from .windows_configuration_py3 import WindowsConfiguration + from .data_disk_py3 import DataDisk + from .container_configuration_py3 import ContainerConfiguration + from .virtual_machine_configuration_py3 import VirtualMachineConfiguration + from .network_security_group_rule_py3 import NetworkSecurityGroupRule + from .inbound_nat_pool_py3 import InboundNATPool + from .pool_endpoint_configuration_py3 import PoolEndpointConfiguration + from .network_configuration_py3 import NetworkConfiguration + from .pool_specification_py3 import PoolSpecification + from .auto_pool_specification_py3 import AutoPoolSpecification + from .pool_information_py3 import PoolInformation + from .job_specification_py3 import JobSpecification + from .recent_job_py3 import RecentJob + from .job_schedule_execution_information_py3 import JobScheduleExecutionInformation + from .job_schedule_statistics_py3 import JobScheduleStatistics + from .cloud_job_schedule_py3 import CloudJobSchedule + from .job_schedule_add_parameter_py3 import JobScheduleAddParameter + from .job_scheduling_error_py3 import JobSchedulingError + from .job_execution_information_py3 import JobExecutionInformation + from .cloud_job_py3 import CloudJob + from .job_add_parameter_py3 import JobAddParameter + from .task_container_execution_information_py3 import TaskContainerExecutionInformation + from .task_failure_information_py3 import TaskFailureInformation + from .job_preparation_task_execution_information_py3 import JobPreparationTaskExecutionInformation + from .job_release_task_execution_information_py3 import JobReleaseTaskExecutionInformation + from .job_preparation_and_release_task_execution_information_py3 import JobPreparationAndReleaseTaskExecutionInformation + from .task_counts_py3 import TaskCounts + from .auto_scale_run_error_py3 import AutoScaleRunError + from .auto_scale_run_py3 import AutoScaleRun + from .resize_error_py3 import ResizeError + from .cloud_pool_py3 import CloudPool + from .pool_add_parameter_py3 import PoolAddParameter + from .affinity_information_py3 import AffinityInformation + from .task_execution_information_py3 import TaskExecutionInformation + from .compute_node_information_py3 import ComputeNodeInformation + from .node_agent_information_py3 import NodeAgentInformation + from .multi_instance_settings_py3 import MultiInstanceSettings + from .task_statistics_py3 import TaskStatistics + from .task_id_range_py3 import TaskIdRange + from .task_dependencies_py3 import TaskDependencies + from .cloud_task_py3 import CloudTask + from .task_add_parameter_py3 import TaskAddParameter + from .task_add_collection_parameter_py3 import TaskAddCollectionParameter + from .error_message_py3 import ErrorMessage + from .batch_error_detail_py3 import BatchErrorDetail + from .batch_error_py3 import BatchError, BatchErrorException + from .task_add_result_py3 import TaskAddResult + from .task_add_collection_result_py3 import TaskAddCollectionResult + from .subtask_information_py3 import SubtaskInformation + from .cloud_task_list_subtasks_result_py3 import CloudTaskListSubtasksResult + from .task_information_py3 import TaskInformation + from .start_task_information_py3 import StartTaskInformation + from .compute_node_error_py3 import ComputeNodeError + from .inbound_endpoint_py3 import InboundEndpoint + from .compute_node_endpoint_configuration_py3 import ComputeNodeEndpointConfiguration + from .compute_node_py3 import ComputeNode + from .compute_node_user_py3 import ComputeNodeUser + from .compute_node_get_remote_login_settings_result_py3 import ComputeNodeGetRemoteLoginSettingsResult + from .job_schedule_patch_parameter_py3 import JobSchedulePatchParameter + from .job_schedule_update_parameter_py3 import JobScheduleUpdateParameter + from .job_disable_parameter_py3 import JobDisableParameter + from .job_terminate_parameter_py3 import JobTerminateParameter + from .job_patch_parameter_py3 import JobPatchParameter + from .job_update_parameter_py3 import JobUpdateParameter + from .pool_enable_auto_scale_parameter_py3 import PoolEnableAutoScaleParameter + from .pool_evaluate_auto_scale_parameter_py3 import PoolEvaluateAutoScaleParameter + from .pool_resize_parameter_py3 import PoolResizeParameter + from .pool_update_properties_parameter_py3 import PoolUpdatePropertiesParameter + from .pool_upgrade_os_parameter_py3 import PoolUpgradeOSParameter + from .pool_patch_parameter_py3 import PoolPatchParameter + from .task_update_parameter_py3 import TaskUpdateParameter + from .node_update_user_parameter_py3 import NodeUpdateUserParameter + from .node_reboot_parameter_py3 import NodeRebootParameter + from .node_reimage_parameter_py3 import NodeReimageParameter + from .node_disable_scheduling_parameter_py3 import NodeDisableSchedulingParameter + from .node_remove_parameter_py3 import NodeRemoveParameter + from .upload_batch_service_logs_configuration_py3 import UploadBatchServiceLogsConfiguration + from .upload_batch_service_logs_result_py3 import UploadBatchServiceLogsResult + from .node_counts_py3 import NodeCounts + from .pool_node_counts_py3 import PoolNodeCounts + from .application_list_options_py3 import ApplicationListOptions + from .application_get_options_py3 import ApplicationGetOptions + from .pool_list_usage_metrics_options_py3 import PoolListUsageMetricsOptions + from .pool_get_all_lifetime_statistics_options_py3 import PoolGetAllLifetimeStatisticsOptions + from .pool_add_options_py3 import PoolAddOptions + from .pool_list_options_py3 import PoolListOptions + from .pool_delete_options_py3 import PoolDeleteOptions + from .pool_exists_options_py3 import PoolExistsOptions + from .pool_get_options_py3 import PoolGetOptions + from .pool_patch_options_py3 import PoolPatchOptions + from .pool_disable_auto_scale_options_py3 import PoolDisableAutoScaleOptions + from .pool_enable_auto_scale_options_py3 import PoolEnableAutoScaleOptions + from .pool_evaluate_auto_scale_options_py3 import PoolEvaluateAutoScaleOptions + from .pool_resize_options_py3 import PoolResizeOptions + from .pool_stop_resize_options_py3 import PoolStopResizeOptions + from .pool_update_properties_options_py3 import PoolUpdatePropertiesOptions + from .pool_upgrade_os_options_py3 import PoolUpgradeOsOptions + from .pool_remove_nodes_options_py3 import PoolRemoveNodesOptions + from .account_list_node_agent_skus_options_py3 import AccountListNodeAgentSkusOptions + from .account_list_pool_node_counts_options_py3 import AccountListPoolNodeCountsOptions + from .job_get_all_lifetime_statistics_options_py3 import JobGetAllLifetimeStatisticsOptions + from .job_delete_options_py3 import JobDeleteOptions + from .job_get_options_py3 import JobGetOptions + from .job_patch_options_py3 import JobPatchOptions + from .job_update_options_py3 import JobUpdateOptions + from .job_disable_options_py3 import JobDisableOptions + from .job_enable_options_py3 import JobEnableOptions + from .job_terminate_options_py3 import JobTerminateOptions + from .job_add_options_py3 import JobAddOptions + from .job_list_options_py3 import JobListOptions + from .job_list_from_job_schedule_options_py3 import JobListFromJobScheduleOptions + from .job_list_preparation_and_release_task_status_options_py3 import JobListPreparationAndReleaseTaskStatusOptions + from .job_get_task_counts_options_py3 import JobGetTaskCountsOptions + from .certificate_add_options_py3 import CertificateAddOptions + from .certificate_list_options_py3 import CertificateListOptions + from .certificate_cancel_deletion_options_py3 import CertificateCancelDeletionOptions + from .certificate_delete_options_py3 import CertificateDeleteOptions + from .certificate_get_options_py3 import CertificateGetOptions + from .file_delete_from_task_options_py3 import FileDeleteFromTaskOptions + from .file_get_from_task_options_py3 import FileGetFromTaskOptions + from .file_get_properties_from_task_options_py3 import FileGetPropertiesFromTaskOptions + from .file_delete_from_compute_node_options_py3 import FileDeleteFromComputeNodeOptions + from .file_get_from_compute_node_options_py3 import FileGetFromComputeNodeOptions + from .file_get_properties_from_compute_node_options_py3 import FileGetPropertiesFromComputeNodeOptions + from .file_list_from_task_options_py3 import FileListFromTaskOptions + from .file_list_from_compute_node_options_py3 import FileListFromComputeNodeOptions + from .job_schedule_exists_options_py3 import JobScheduleExistsOptions + from .job_schedule_delete_options_py3 import JobScheduleDeleteOptions + from .job_schedule_get_options_py3 import JobScheduleGetOptions + from .job_schedule_patch_options_py3 import JobSchedulePatchOptions + from .job_schedule_update_options_py3 import JobScheduleUpdateOptions + from .job_schedule_disable_options_py3 import JobScheduleDisableOptions + from .job_schedule_enable_options_py3 import JobScheduleEnableOptions + from .job_schedule_terminate_options_py3 import JobScheduleTerminateOptions + from .job_schedule_add_options_py3 import JobScheduleAddOptions + from .job_schedule_list_options_py3 import JobScheduleListOptions + from .task_add_options_py3 import TaskAddOptions + from .task_list_options_py3 import TaskListOptions + from .task_add_collection_options_py3 import TaskAddCollectionOptions + from .task_delete_options_py3 import TaskDeleteOptions + from .task_get_options_py3 import TaskGetOptions + from .task_update_options_py3 import TaskUpdateOptions + from .task_list_subtasks_options_py3 import TaskListSubtasksOptions + from .task_terminate_options_py3 import TaskTerminateOptions + from .task_reactivate_options_py3 import TaskReactivateOptions + from .compute_node_add_user_options_py3 import ComputeNodeAddUserOptions + from .compute_node_delete_user_options_py3 import ComputeNodeDeleteUserOptions + from .compute_node_update_user_options_py3 import ComputeNodeUpdateUserOptions + from .compute_node_get_options_py3 import ComputeNodeGetOptions + from .compute_node_reboot_options_py3 import ComputeNodeRebootOptions + from .compute_node_reimage_options_py3 import ComputeNodeReimageOptions + from .compute_node_disable_scheduling_options_py3 import ComputeNodeDisableSchedulingOptions + from .compute_node_enable_scheduling_options_py3 import ComputeNodeEnableSchedulingOptions + from .compute_node_get_remote_login_settings_options_py3 import ComputeNodeGetRemoteLoginSettingsOptions + from .compute_node_get_remote_desktop_options_py3 import ComputeNodeGetRemoteDesktopOptions + from .compute_node_upload_batch_service_logs_options_py3 import ComputeNodeUploadBatchServiceLogsOptions + from .compute_node_list_options_py3 import ComputeNodeListOptions +except (SyntaxError, ImportError): + from .pool_usage_metrics import PoolUsageMetrics + from .image_reference import ImageReference + from .node_agent_sku import NodeAgentSku + from .authentication_token_settings import AuthenticationTokenSettings + from .usage_statistics import UsageStatistics + from .resource_statistics import ResourceStatistics + from .pool_statistics import PoolStatistics + from .job_statistics import JobStatistics + from .name_value_pair import NameValuePair + from .delete_certificate_error import DeleteCertificateError + from .certificate import Certificate + from .application_package_reference import ApplicationPackageReference + from .application_summary import ApplicationSummary + from .certificate_add_parameter import CertificateAddParameter + from .file_properties import FileProperties + from .node_file import NodeFile + from .schedule import Schedule + from .job_constraints import JobConstraints + from .container_registry import ContainerRegistry + from .task_container_settings import TaskContainerSettings + from .resource_file import ResourceFile + from .environment_setting import EnvironmentSetting + from .exit_options import ExitOptions + from .exit_code_mapping import ExitCodeMapping + from .exit_code_range_mapping import ExitCodeRangeMapping + from .exit_conditions import ExitConditions + from .auto_user_specification import AutoUserSpecification + from .user_identity import UserIdentity + from .linux_user_configuration import LinuxUserConfiguration + from .user_account import UserAccount + from .task_constraints import TaskConstraints + from .output_file_blob_container_destination import OutputFileBlobContainerDestination + from .output_file_destination import OutputFileDestination + from .output_file_upload_options import OutputFileUploadOptions + from .output_file import OutputFile + from .job_manager_task import JobManagerTask + from .job_preparation_task import JobPreparationTask + from .job_release_task import JobReleaseTask + from .task_scheduling_policy import TaskSchedulingPolicy + from .start_task import StartTask + from .certificate_reference import CertificateReference + from .metadata_item import MetadataItem + from .cloud_service_configuration import CloudServiceConfiguration + from .os_disk import OSDisk + from .windows_configuration import WindowsConfiguration + from .data_disk import DataDisk + from .container_configuration import ContainerConfiguration + from .virtual_machine_configuration import VirtualMachineConfiguration + from .network_security_group_rule import NetworkSecurityGroupRule + from .inbound_nat_pool import InboundNATPool + from .pool_endpoint_configuration import PoolEndpointConfiguration + from .network_configuration import NetworkConfiguration + from .pool_specification import PoolSpecification + from .auto_pool_specification import AutoPoolSpecification + from .pool_information import PoolInformation + from .job_specification import JobSpecification + from .recent_job import RecentJob + from .job_schedule_execution_information import JobScheduleExecutionInformation + from .job_schedule_statistics import JobScheduleStatistics + from .cloud_job_schedule import CloudJobSchedule + from .job_schedule_add_parameter import JobScheduleAddParameter + from .job_scheduling_error import JobSchedulingError + from .job_execution_information import JobExecutionInformation + from .cloud_job import CloudJob + from .job_add_parameter import JobAddParameter + from .task_container_execution_information import TaskContainerExecutionInformation + from .task_failure_information import TaskFailureInformation + from .job_preparation_task_execution_information import JobPreparationTaskExecutionInformation + from .job_release_task_execution_information import JobReleaseTaskExecutionInformation + from .job_preparation_and_release_task_execution_information import JobPreparationAndReleaseTaskExecutionInformation + from .task_counts import TaskCounts + from .auto_scale_run_error import AutoScaleRunError + from .auto_scale_run import AutoScaleRun + from .resize_error import ResizeError + from .cloud_pool import CloudPool + from .pool_add_parameter import PoolAddParameter + from .affinity_information import AffinityInformation + from .task_execution_information import TaskExecutionInformation + from .compute_node_information import ComputeNodeInformation + from .node_agent_information import NodeAgentInformation + from .multi_instance_settings import MultiInstanceSettings + from .task_statistics import TaskStatistics + from .task_id_range import TaskIdRange + from .task_dependencies import TaskDependencies + from .cloud_task import CloudTask + from .task_add_parameter import TaskAddParameter + from .task_add_collection_parameter import TaskAddCollectionParameter + from .error_message import ErrorMessage + from .batch_error_detail import BatchErrorDetail + from .batch_error import BatchError, BatchErrorException + from .task_add_result import TaskAddResult + from .task_add_collection_result import TaskAddCollectionResult + from .subtask_information import SubtaskInformation + from .cloud_task_list_subtasks_result import CloudTaskListSubtasksResult + from .task_information import TaskInformation + from .start_task_information import StartTaskInformation + from .compute_node_error import ComputeNodeError + from .inbound_endpoint import InboundEndpoint + from .compute_node_endpoint_configuration import ComputeNodeEndpointConfiguration + from .compute_node import ComputeNode + from .compute_node_user import ComputeNodeUser + from .compute_node_get_remote_login_settings_result import ComputeNodeGetRemoteLoginSettingsResult + from .job_schedule_patch_parameter import JobSchedulePatchParameter + from .job_schedule_update_parameter import JobScheduleUpdateParameter + from .job_disable_parameter import JobDisableParameter + from .job_terminate_parameter import JobTerminateParameter + from .job_patch_parameter import JobPatchParameter + from .job_update_parameter import JobUpdateParameter + from .pool_enable_auto_scale_parameter import PoolEnableAutoScaleParameter + from .pool_evaluate_auto_scale_parameter import PoolEvaluateAutoScaleParameter + from .pool_resize_parameter import PoolResizeParameter + from .pool_update_properties_parameter import PoolUpdatePropertiesParameter + from .pool_upgrade_os_parameter import PoolUpgradeOSParameter + from .pool_patch_parameter import PoolPatchParameter + from .task_update_parameter import TaskUpdateParameter + from .node_update_user_parameter import NodeUpdateUserParameter + from .node_reboot_parameter import NodeRebootParameter + from .node_reimage_parameter import NodeReimageParameter + from .node_disable_scheduling_parameter import NodeDisableSchedulingParameter + from .node_remove_parameter import NodeRemoveParameter + from .upload_batch_service_logs_configuration import UploadBatchServiceLogsConfiguration + from .upload_batch_service_logs_result import UploadBatchServiceLogsResult + from .node_counts import NodeCounts + from .pool_node_counts import PoolNodeCounts + from .application_list_options import ApplicationListOptions + from .application_get_options import ApplicationGetOptions + from .pool_list_usage_metrics_options import PoolListUsageMetricsOptions + from .pool_get_all_lifetime_statistics_options import PoolGetAllLifetimeStatisticsOptions + from .pool_add_options import PoolAddOptions + from .pool_list_options import PoolListOptions + from .pool_delete_options import PoolDeleteOptions + from .pool_exists_options import PoolExistsOptions + from .pool_get_options import PoolGetOptions + from .pool_patch_options import PoolPatchOptions + from .pool_disable_auto_scale_options import PoolDisableAutoScaleOptions + from .pool_enable_auto_scale_options import PoolEnableAutoScaleOptions + from .pool_evaluate_auto_scale_options import PoolEvaluateAutoScaleOptions + from .pool_resize_options import PoolResizeOptions + from .pool_stop_resize_options import PoolStopResizeOptions + from .pool_update_properties_options import PoolUpdatePropertiesOptions + from .pool_upgrade_os_options import PoolUpgradeOsOptions + from .pool_remove_nodes_options import PoolRemoveNodesOptions + from .account_list_node_agent_skus_options import AccountListNodeAgentSkusOptions + from .account_list_pool_node_counts_options import AccountListPoolNodeCountsOptions + from .job_get_all_lifetime_statistics_options import JobGetAllLifetimeStatisticsOptions + from .job_delete_options import JobDeleteOptions + from .job_get_options import JobGetOptions + from .job_patch_options import JobPatchOptions + from .job_update_options import JobUpdateOptions + from .job_disable_options import JobDisableOptions + from .job_enable_options import JobEnableOptions + from .job_terminate_options import JobTerminateOptions + from .job_add_options import JobAddOptions + from .job_list_options import JobListOptions + from .job_list_from_job_schedule_options import JobListFromJobScheduleOptions + from .job_list_preparation_and_release_task_status_options import JobListPreparationAndReleaseTaskStatusOptions + from .job_get_task_counts_options import JobGetTaskCountsOptions + from .certificate_add_options import CertificateAddOptions + from .certificate_list_options import CertificateListOptions + from .certificate_cancel_deletion_options import CertificateCancelDeletionOptions + from .certificate_delete_options import CertificateDeleteOptions + from .certificate_get_options import CertificateGetOptions + from .file_delete_from_task_options import FileDeleteFromTaskOptions + from .file_get_from_task_options import FileGetFromTaskOptions + from .file_get_properties_from_task_options import FileGetPropertiesFromTaskOptions + from .file_delete_from_compute_node_options import FileDeleteFromComputeNodeOptions + from .file_get_from_compute_node_options import FileGetFromComputeNodeOptions + from .file_get_properties_from_compute_node_options import FileGetPropertiesFromComputeNodeOptions + from .file_list_from_task_options import FileListFromTaskOptions + from .file_list_from_compute_node_options import FileListFromComputeNodeOptions + from .job_schedule_exists_options import JobScheduleExistsOptions + from .job_schedule_delete_options import JobScheduleDeleteOptions + from .job_schedule_get_options import JobScheduleGetOptions + from .job_schedule_patch_options import JobSchedulePatchOptions + from .job_schedule_update_options import JobScheduleUpdateOptions + from .job_schedule_disable_options import JobScheduleDisableOptions + from .job_schedule_enable_options import JobScheduleEnableOptions + from .job_schedule_terminate_options import JobScheduleTerminateOptions + from .job_schedule_add_options import JobScheduleAddOptions + from .job_schedule_list_options import JobScheduleListOptions + from .task_add_options import TaskAddOptions + from .task_list_options import TaskListOptions + from .task_add_collection_options import TaskAddCollectionOptions + from .task_delete_options import TaskDeleteOptions + from .task_get_options import TaskGetOptions + from .task_update_options import TaskUpdateOptions + from .task_list_subtasks_options import TaskListSubtasksOptions + from .task_terminate_options import TaskTerminateOptions + from .task_reactivate_options import TaskReactivateOptions + from .compute_node_add_user_options import ComputeNodeAddUserOptions + from .compute_node_delete_user_options import ComputeNodeDeleteUserOptions + from .compute_node_update_user_options import ComputeNodeUpdateUserOptions + from .compute_node_get_options import ComputeNodeGetOptions + from .compute_node_reboot_options import ComputeNodeRebootOptions + from .compute_node_reimage_options import ComputeNodeReimageOptions + from .compute_node_disable_scheduling_options import ComputeNodeDisableSchedulingOptions + from .compute_node_enable_scheduling_options import ComputeNodeEnableSchedulingOptions + from .compute_node_get_remote_login_settings_options import ComputeNodeGetRemoteLoginSettingsOptions + from .compute_node_get_remote_desktop_options import ComputeNodeGetRemoteDesktopOptions + from .compute_node_upload_batch_service_logs_options import ComputeNodeUploadBatchServiceLogsOptions + from .compute_node_list_options import ComputeNodeListOptions +from .application_summary_paged import ApplicationSummaryPaged +from .pool_usage_metrics_paged import PoolUsageMetricsPaged +from .cloud_pool_paged import CloudPoolPaged +from .node_agent_sku_paged import NodeAgentSkuPaged +from .pool_node_counts_paged import PoolNodeCountsPaged +from .cloud_job_paged import CloudJobPaged +from .job_preparation_and_release_task_execution_information_paged import JobPreparationAndReleaseTaskExecutionInformationPaged +from .certificate_paged import CertificatePaged +from .node_file_paged import NodeFilePaged +from .cloud_job_schedule_paged import CloudJobSchedulePaged +from .cloud_task_paged import CloudTaskPaged +from .compute_node_paged import ComputeNodePaged +from .batch_service_client_enums import ( + OSType, + AccessScope, + CertificateState, + CertificateFormat, + JobAction, + DependencyAction, + AutoUserScope, + ElevationLevel, + OutputFileUploadCondition, + ComputeNodeFillType, + CertificateStoreLocation, + CertificateVisibility, + CachingType, + StorageAccountType, + InboundEndpointProtocol, + NetworkSecurityGroupRuleAccess, + PoolLifetimeOption, + OnAllTasksComplete, + OnTaskFailure, + JobScheduleState, + ErrorCategory, + JobState, + JobPreparationTaskState, + TaskExecutionResult, + JobReleaseTaskState, + PoolState, + AllocationState, + TaskState, + TaskAddStatus, + SubtaskState, + StartTaskState, + ComputeNodeState, + SchedulingState, + DisableJobOption, + ComputeNodeDeallocationOption, + ComputeNodeRebootOption, + ComputeNodeReimageOption, + DisableComputeNodeSchedulingOption, +) + +__all__ = [ + 'PoolUsageMetrics', + 'ImageReference', + 'NodeAgentSku', + 'AuthenticationTokenSettings', + 'UsageStatistics', + 'ResourceStatistics', + 'PoolStatistics', + 'JobStatistics', + 'NameValuePair', + 'DeleteCertificateError', + 'Certificate', + 'ApplicationPackageReference', + 'ApplicationSummary', + 'CertificateAddParameter', + 'FileProperties', + 'NodeFile', + 'Schedule', + 'JobConstraints', + 'ContainerRegistry', + 'TaskContainerSettings', + 'ResourceFile', + 'EnvironmentSetting', + 'ExitOptions', + 'ExitCodeMapping', + 'ExitCodeRangeMapping', + 'ExitConditions', + 'AutoUserSpecification', + 'UserIdentity', + 'LinuxUserConfiguration', + 'UserAccount', + 'TaskConstraints', + 'OutputFileBlobContainerDestination', + 'OutputFileDestination', + 'OutputFileUploadOptions', + 'OutputFile', + 'JobManagerTask', + 'JobPreparationTask', + 'JobReleaseTask', + 'TaskSchedulingPolicy', + 'StartTask', + 'CertificateReference', + 'MetadataItem', + 'CloudServiceConfiguration', + 'OSDisk', + 'WindowsConfiguration', + 'DataDisk', + 'ContainerConfiguration', + 'VirtualMachineConfiguration', + 'NetworkSecurityGroupRule', + 'InboundNATPool', + 'PoolEndpointConfiguration', + 'NetworkConfiguration', + 'PoolSpecification', + 'AutoPoolSpecification', + 'PoolInformation', + 'JobSpecification', + 'RecentJob', + 'JobScheduleExecutionInformation', + 'JobScheduleStatistics', + 'CloudJobSchedule', + 'JobScheduleAddParameter', + 'JobSchedulingError', + 'JobExecutionInformation', + 'CloudJob', + 'JobAddParameter', + 'TaskContainerExecutionInformation', + 'TaskFailureInformation', + 'JobPreparationTaskExecutionInformation', + 'JobReleaseTaskExecutionInformation', + 'JobPreparationAndReleaseTaskExecutionInformation', + 'TaskCounts', + 'AutoScaleRunError', + 'AutoScaleRun', + 'ResizeError', + 'CloudPool', + 'PoolAddParameter', + 'AffinityInformation', + 'TaskExecutionInformation', + 'ComputeNodeInformation', + 'NodeAgentInformation', + 'MultiInstanceSettings', + 'TaskStatistics', + 'TaskIdRange', + 'TaskDependencies', + 'CloudTask', + 'TaskAddParameter', + 'TaskAddCollectionParameter', + 'ErrorMessage', + 'BatchErrorDetail', + 'BatchError', 'BatchErrorException', + 'TaskAddResult', + 'TaskAddCollectionResult', + 'SubtaskInformation', + 'CloudTaskListSubtasksResult', + 'TaskInformation', + 'StartTaskInformation', + 'ComputeNodeError', + 'InboundEndpoint', + 'ComputeNodeEndpointConfiguration', + 'ComputeNode', + 'ComputeNodeUser', + 'ComputeNodeGetRemoteLoginSettingsResult', + 'JobSchedulePatchParameter', + 'JobScheduleUpdateParameter', + 'JobDisableParameter', + 'JobTerminateParameter', + 'JobPatchParameter', + 'JobUpdateParameter', + 'PoolEnableAutoScaleParameter', + 'PoolEvaluateAutoScaleParameter', + 'PoolResizeParameter', + 'PoolUpdatePropertiesParameter', + 'PoolUpgradeOSParameter', + 'PoolPatchParameter', + 'TaskUpdateParameter', + 'NodeUpdateUserParameter', + 'NodeRebootParameter', + 'NodeReimageParameter', + 'NodeDisableSchedulingParameter', + 'NodeRemoveParameter', + 'UploadBatchServiceLogsConfiguration', + 'UploadBatchServiceLogsResult', + 'NodeCounts', + 'PoolNodeCounts', + 'ApplicationListOptions', + 'ApplicationGetOptions', + 'PoolListUsageMetricsOptions', + 'PoolGetAllLifetimeStatisticsOptions', + 'PoolAddOptions', + 'PoolListOptions', + 'PoolDeleteOptions', + 'PoolExistsOptions', + 'PoolGetOptions', + 'PoolPatchOptions', + 'PoolDisableAutoScaleOptions', + 'PoolEnableAutoScaleOptions', + 'PoolEvaluateAutoScaleOptions', + 'PoolResizeOptions', + 'PoolStopResizeOptions', + 'PoolUpdatePropertiesOptions', + 'PoolUpgradeOsOptions', + 'PoolRemoveNodesOptions', + 'AccountListNodeAgentSkusOptions', + 'AccountListPoolNodeCountsOptions', + 'JobGetAllLifetimeStatisticsOptions', + 'JobDeleteOptions', + 'JobGetOptions', + 'JobPatchOptions', + 'JobUpdateOptions', + 'JobDisableOptions', + 'JobEnableOptions', + 'JobTerminateOptions', + 'JobAddOptions', + 'JobListOptions', + 'JobListFromJobScheduleOptions', + 'JobListPreparationAndReleaseTaskStatusOptions', + 'JobGetTaskCountsOptions', + 'CertificateAddOptions', + 'CertificateListOptions', + 'CertificateCancelDeletionOptions', + 'CertificateDeleteOptions', + 'CertificateGetOptions', + 'FileDeleteFromTaskOptions', + 'FileGetFromTaskOptions', + 'FileGetPropertiesFromTaskOptions', + 'FileDeleteFromComputeNodeOptions', + 'FileGetFromComputeNodeOptions', + 'FileGetPropertiesFromComputeNodeOptions', + 'FileListFromTaskOptions', + 'FileListFromComputeNodeOptions', + 'JobScheduleExistsOptions', + 'JobScheduleDeleteOptions', + 'JobScheduleGetOptions', + 'JobSchedulePatchOptions', + 'JobScheduleUpdateOptions', + 'JobScheduleDisableOptions', + 'JobScheduleEnableOptions', + 'JobScheduleTerminateOptions', + 'JobScheduleAddOptions', + 'JobScheduleListOptions', + 'TaskAddOptions', + 'TaskListOptions', + 'TaskAddCollectionOptions', + 'TaskDeleteOptions', + 'TaskGetOptions', + 'TaskUpdateOptions', + 'TaskListSubtasksOptions', + 'TaskTerminateOptions', + 'TaskReactivateOptions', + 'ComputeNodeAddUserOptions', + 'ComputeNodeDeleteUserOptions', + 'ComputeNodeUpdateUserOptions', + 'ComputeNodeGetOptions', + 'ComputeNodeRebootOptions', + 'ComputeNodeReimageOptions', + 'ComputeNodeDisableSchedulingOptions', + 'ComputeNodeEnableSchedulingOptions', + 'ComputeNodeGetRemoteLoginSettingsOptions', + 'ComputeNodeGetRemoteDesktopOptions', + 'ComputeNodeUploadBatchServiceLogsOptions', + 'ComputeNodeListOptions', + 'ApplicationSummaryPaged', + 'PoolUsageMetricsPaged', + 'CloudPoolPaged', + 'NodeAgentSkuPaged', + 'PoolNodeCountsPaged', + 'CloudJobPaged', + 'JobPreparationAndReleaseTaskExecutionInformationPaged', + 'CertificatePaged', + 'NodeFilePaged', + 'CloudJobSchedulePaged', + 'CloudTaskPaged', + 'ComputeNodePaged', + 'OSType', + 'AccessScope', + 'CertificateState', + 'CertificateFormat', + 'JobAction', + 'DependencyAction', + 'AutoUserScope', + 'ElevationLevel', + 'OutputFileUploadCondition', + 'ComputeNodeFillType', + 'CertificateStoreLocation', + 'CertificateVisibility', + 'CachingType', + 'StorageAccountType', + 'InboundEndpointProtocol', + 'NetworkSecurityGroupRuleAccess', + 'PoolLifetimeOption', + 'OnAllTasksComplete', + 'OnTaskFailure', + 'JobScheduleState', + 'ErrorCategory', + 'JobState', + 'JobPreparationTaskState', + 'TaskExecutionResult', + 'JobReleaseTaskState', + 'PoolState', + 'AllocationState', + 'TaskState', + 'TaskAddStatus', + 'SubtaskState', + 'StartTaskState', + 'ComputeNodeState', + 'SchedulingState', + 'DisableJobOption', + 'ComputeNodeDeallocationOption', + 'ComputeNodeRebootOption', + 'ComputeNodeReimageOption', + 'DisableComputeNodeSchedulingOption', +] diff --git a/azext/generated/sdk/batch/v2018_08_01/models/account_list_node_agent_skus_options.py b/azext/generated/sdk/batch/v2018_08_01/models/account_list_node_agent_skus_options.py new file mode 100644 index 00000000..6cc1c050 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/account_list_node_agent_skus_options.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AccountListNodeAgentSkusOptions(Model): + """Additional parameters for list_node_agent_skus operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-node-agent-skus. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 results will be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(AccountListNodeAgentSkusOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/account_list_node_agent_skus_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/account_list_node_agent_skus_options_py3.py new file mode 100644 index 00000000..01d06fb1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/account_list_node_agent_skus_options_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AccountListNodeAgentSkusOptions(Model): + """Additional parameters for list_node_agent_skus operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-node-agent-skus. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 results will be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(AccountListNodeAgentSkusOptions, self).__init__(**kwargs) + self.filter = filter + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/account_list_pool_node_counts_options.py b/azext/generated/sdk/batch/v2018_08_01/models/account_list_pool_node_counts_options.py new file mode 100644 index 00000000..4ad2da01 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/account_list_pool_node_counts_options.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AccountListPoolNodeCountsOptions(Model): + """Additional parameters for list_pool_node_counts operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch. + :type filter: str + :param max_results: The maximum number of items to return in the response. + Default value: 10 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(AccountListPoolNodeCountsOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.max_results = kwargs.get('max_results', 10) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/account_list_pool_node_counts_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/account_list_pool_node_counts_options_py3.py new file mode 100644 index 00000000..e9f0d02b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/account_list_pool_node_counts_options_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AccountListPoolNodeCountsOptions(Model): + """Additional parameters for list_pool_node_counts operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch. + :type filter: str + :param max_results: The maximum number of items to return in the response. + Default value: 10 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, max_results: int=10, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(AccountListPoolNodeCountsOptions, self).__init__(**kwargs) + self.filter = filter + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/affinity_information.py b/azext/generated/sdk/batch/v2018_08_01/models/affinity_information.py new file mode 100644 index 00000000..206608f1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/affinity_information.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AffinityInformation(Model): + """A locality hint that can be used by the Batch service to select a compute + node on which to start a task. + + All required parameters must be populated in order to send to Azure. + + :param affinity_id: Required. An opaque string representing the location + of a compute node or a task that has run previously. You can pass the + affinityId of a compute node to indicate that this task needs to run on + that compute node. Note that this is just a soft affinity. If the target + node is busy or unavailable at the time the task is scheduled, then the + task will be scheduled elsewhere. + :type affinity_id: str + """ + + _validation = { + 'affinity_id': {'required': True}, + } + + _attribute_map = { + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(AffinityInformation, self).__init__(**kwargs) + self.affinity_id = kwargs.get('affinity_id', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/affinity_information_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/affinity_information_py3.py new file mode 100644 index 00000000..fcf8d04b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/affinity_information_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AffinityInformation(Model): + """A locality hint that can be used by the Batch service to select a compute + node on which to start a task. + + All required parameters must be populated in order to send to Azure. + + :param affinity_id: Required. An opaque string representing the location + of a compute node or a task that has run previously. You can pass the + affinityId of a compute node to indicate that this task needs to run on + that compute node. Note that this is just a soft affinity. If the target + node is busy or unavailable at the time the task is scheduled, then the + task will be scheduled elsewhere. + :type affinity_id: str + """ + + _validation = { + 'affinity_id': {'required': True}, + } + + _attribute_map = { + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + } + + def __init__(self, *, affinity_id: str, **kwargs) -> None: + super(AffinityInformation, self).__init__(**kwargs) + self.affinity_id = affinity_id diff --git a/azext/generated/sdk/batch/v2018_08_01/models/application_get_options.py b/azext/generated/sdk/batch/v2018_08_01/models/application_get_options.py new file mode 100644 index 00000000..038c5421 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/application_get_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationGetOptions(Model): + """Additional parameters for get operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ApplicationGetOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/application_get_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/application_get_options_py3.py new file mode 100644 index 00000000..3c9d5c0a --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/application_get_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationGetOptions(Model): + """Additional parameters for get operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ApplicationGetOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/application_list_options.py b/azext/generated/sdk/batch/v2018_08_01/models/application_list_options.py new file mode 100644 index 00000000..bc3ddb36 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/application_list_options.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationListOptions(Model): + """Additional parameters for list operation. + + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 applications can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ApplicationListOptions, self).__init__(**kwargs) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/application_list_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/application_list_options_py3.py new file mode 100644 index 00000000..445de51e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/application_list_options_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationListOptions(Model): + """Additional parameters for list operation. + + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 applications can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ApplicationListOptions, self).__init__(**kwargs) + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/application_package_reference.py b/azext/generated/sdk/batch/v2018_08_01/models/application_package_reference.py new file mode 100644 index 00000000..08673617 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/application_package_reference.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationPackageReference(Model): + """A reference to an application package to be deployed to compute nodes. + + All required parameters must be populated in order to send to Azure. + + :param application_id: Required. The ID of the application to deploy. + :type application_id: str + :param version: The version of the application to deploy. If omitted, the + default version is deployed. If this is omitted on a pool, and no default + version is specified for this application, the request fails with the + error code InvalidApplicationPackageReferences and HTTP status code 409. + If this is omitted on a task, and no default version is specified for this + application, the task fails with a pre-processing error. + :type version: str + """ + + _validation = { + 'application_id': {'required': True}, + } + + _attribute_map = { + 'application_id': {'key': 'applicationId', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ApplicationPackageReference, self).__init__(**kwargs) + self.application_id = kwargs.get('application_id', None) + self.version = kwargs.get('version', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/application_package_reference_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/application_package_reference_py3.py new file mode 100644 index 00000000..dd81226b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/application_package_reference_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationPackageReference(Model): + """A reference to an application package to be deployed to compute nodes. + + All required parameters must be populated in order to send to Azure. + + :param application_id: Required. The ID of the application to deploy. + :type application_id: str + :param version: The version of the application to deploy. If omitted, the + default version is deployed. If this is omitted on a pool, and no default + version is specified for this application, the request fails with the + error code InvalidApplicationPackageReferences and HTTP status code 409. + If this is omitted on a task, and no default version is specified for this + application, the task fails with a pre-processing error. + :type version: str + """ + + _validation = { + 'application_id': {'required': True}, + } + + _attribute_map = { + 'application_id': {'key': 'applicationId', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + } + + def __init__(self, *, application_id: str, version: str=None, **kwargs) -> None: + super(ApplicationPackageReference, self).__init__(**kwargs) + self.application_id = application_id + self.version = version diff --git a/azext/generated/sdk/batch/v2018_08_01/models/application_summary.py b/azext/generated/sdk/batch/v2018_08_01/models/application_summary.py new file mode 100644 index 00000000..4a65a11f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/application_summary.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationSummary(Model): + """Contains information about an application in an Azure Batch account. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the application + within the account. + :type id: str + :param display_name: Required. The display name for the application. + :type display_name: str + :param versions: Required. The list of available versions of the + application. + :type versions: list[str] + """ + + _validation = { + 'id': {'required': True}, + 'display_name': {'required': True}, + 'versions': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'versions': {'key': 'versions', 'type': '[str]'}, + } + + def __init__(self, **kwargs): + super(ApplicationSummary, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.versions = kwargs.get('versions', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/application_summary_paged.py b/azext/generated/sdk/batch/v2018_08_01/models/application_summary_paged.py new file mode 100644 index 00000000..64ed9c6b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/application_summary_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class ApplicationSummaryPaged(Paged): + """ + A paging container for iterating over a list of :class:`ApplicationSummary ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[ApplicationSummary]'} + } + + def __init__(self, *args, **kwargs): + + super(ApplicationSummaryPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/application_summary_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/application_summary_py3.py new file mode 100644 index 00000000..68c838e8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/application_summary_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationSummary(Model): + """Contains information about an application in an Azure Batch account. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the application + within the account. + :type id: str + :param display_name: Required. The display name for the application. + :type display_name: str + :param versions: Required. The list of available versions of the + application. + :type versions: list[str] + """ + + _validation = { + 'id': {'required': True}, + 'display_name': {'required': True}, + 'versions': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'versions': {'key': 'versions', 'type': '[str]'}, + } + + def __init__(self, *, id: str, display_name: str, versions, **kwargs) -> None: + super(ApplicationSummary, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.versions = versions diff --git a/azext/generated/sdk/batch/v2018_08_01/models/authentication_token_settings.py b/azext/generated/sdk/batch/v2018_08_01/models/authentication_token_settings.py new file mode 100644 index 00000000..fd05e87b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/authentication_token_settings.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AuthenticationTokenSettings(Model): + """The settings for an authentication token that the task can use to perform + Batch service operations. + + :param access: The Batch resources to which the token grants access. The + authentication token grants access to a limited set of Batch service + operations. Currently the only supported value for the access property is + 'job', which grants access to all operations related to the job which + contains the task. + :type access: list[str or ~azure.batch.models.AccessScope] + """ + + _attribute_map = { + 'access': {'key': 'access', 'type': '[AccessScope]'}, + } + + def __init__(self, **kwargs): + super(AuthenticationTokenSettings, self).__init__(**kwargs) + self.access = kwargs.get('access', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/authentication_token_settings_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/authentication_token_settings_py3.py new file mode 100644 index 00000000..23dde60c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/authentication_token_settings_py3.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AuthenticationTokenSettings(Model): + """The settings for an authentication token that the task can use to perform + Batch service operations. + + :param access: The Batch resources to which the token grants access. The + authentication token grants access to a limited set of Batch service + operations. Currently the only supported value for the access property is + 'job', which grants access to all operations related to the job which + contains the task. + :type access: list[str or ~azure.batch.models.AccessScope] + """ + + _attribute_map = { + 'access': {'key': 'access', 'type': '[AccessScope]'}, + } + + def __init__(self, *, access=None, **kwargs) -> None: + super(AuthenticationTokenSettings, self).__init__(**kwargs) + self.access = access diff --git a/azext/generated/sdk/batch/v2018_08_01/models/auto_pool_specification.py b/azext/generated/sdk/batch/v2018_08_01/models/auto_pool_specification.py new file mode 100644 index 00000000..7383a65a --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/auto_pool_specification.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoPoolSpecification(Model): + """Specifies characteristics for a temporary 'auto pool'. The Batch service + will create this auto pool when the job is submitted. + + All required parameters must be populated in order to send to Azure. + + :param auto_pool_id_prefix: A prefix to be added to the unique identifier + when a pool is automatically created. The Batch service assigns each auto + pool a unique identifier on creation. To distinguish between pools created + for different purposes, you can specify this element to add a prefix to + the ID that is assigned. The prefix can be up to 20 characters long. + :type auto_pool_id_prefix: str + :param pool_lifetime_option: Required. The minimum lifetime of created + auto pools, and how multiple jobs on a schedule are assigned to pools. + Possible values include: 'jobSchedule', 'job' + :type pool_lifetime_option: str or ~azure.batch.models.PoolLifetimeOption + :param keep_alive: Whether to keep an auto pool alive after its lifetime + expires. If false, the Batch service deletes the pool once its lifetime + (as determined by the poolLifetimeOption setting) expires; that is, when + the job or job schedule completes. If true, the Batch service does not + delete the pool automatically. It is up to the user to delete auto pools + created with this option. + :type keep_alive: bool + :param pool: The pool specification for the auto pool. + :type pool: ~azure.batch.models.PoolSpecification + """ + + _validation = { + 'pool_lifetime_option': {'required': True}, + } + + _attribute_map = { + 'auto_pool_id_prefix': {'key': 'autoPoolIdPrefix', 'type': 'str'}, + 'pool_lifetime_option': {'key': 'poolLifetimeOption', 'type': 'PoolLifetimeOption'}, + 'keep_alive': {'key': 'keepAlive', 'type': 'bool'}, + 'pool': {'key': 'pool', 'type': 'PoolSpecification'}, + } + + def __init__(self, **kwargs): + super(AutoPoolSpecification, self).__init__(**kwargs) + self.auto_pool_id_prefix = kwargs.get('auto_pool_id_prefix', None) + self.pool_lifetime_option = kwargs.get('pool_lifetime_option', None) + self.keep_alive = kwargs.get('keep_alive', None) + self.pool = kwargs.get('pool', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/auto_pool_specification_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/auto_pool_specification_py3.py new file mode 100644 index 00000000..4b07e831 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/auto_pool_specification_py3.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoPoolSpecification(Model): + """Specifies characteristics for a temporary 'auto pool'. The Batch service + will create this auto pool when the job is submitted. + + All required parameters must be populated in order to send to Azure. + + :param auto_pool_id_prefix: A prefix to be added to the unique identifier + when a pool is automatically created. The Batch service assigns each auto + pool a unique identifier on creation. To distinguish between pools created + for different purposes, you can specify this element to add a prefix to + the ID that is assigned. The prefix can be up to 20 characters long. + :type auto_pool_id_prefix: str + :param pool_lifetime_option: Required. The minimum lifetime of created + auto pools, and how multiple jobs on a schedule are assigned to pools. + Possible values include: 'jobSchedule', 'job' + :type pool_lifetime_option: str or ~azure.batch.models.PoolLifetimeOption + :param keep_alive: Whether to keep an auto pool alive after its lifetime + expires. If false, the Batch service deletes the pool once its lifetime + (as determined by the poolLifetimeOption setting) expires; that is, when + the job or job schedule completes. If true, the Batch service does not + delete the pool automatically. It is up to the user to delete auto pools + created with this option. + :type keep_alive: bool + :param pool: The pool specification for the auto pool. + :type pool: ~azure.batch.models.PoolSpecification + """ + + _validation = { + 'pool_lifetime_option': {'required': True}, + } + + _attribute_map = { + 'auto_pool_id_prefix': {'key': 'autoPoolIdPrefix', 'type': 'str'}, + 'pool_lifetime_option': {'key': 'poolLifetimeOption', 'type': 'PoolLifetimeOption'}, + 'keep_alive': {'key': 'keepAlive', 'type': 'bool'}, + 'pool': {'key': 'pool', 'type': 'PoolSpecification'}, + } + + def __init__(self, *, pool_lifetime_option, auto_pool_id_prefix: str=None, keep_alive: bool=None, pool=None, **kwargs) -> None: + super(AutoPoolSpecification, self).__init__(**kwargs) + self.auto_pool_id_prefix = auto_pool_id_prefix + self.pool_lifetime_option = pool_lifetime_option + self.keep_alive = keep_alive + self.pool = pool diff --git a/azext/generated/sdk/batch/v2018_08_01/models/auto_scale_run.py b/azext/generated/sdk/batch/v2018_08_01/models/auto_scale_run.py new file mode 100644 index 00000000..06b0d4fe --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/auto_scale_run.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoScaleRun(Model): + """The results and errors from an execution of a pool autoscale formula. + + All required parameters must be populated in order to send to Azure. + + :param timestamp: Required. The time at which the autoscale formula was + last evaluated. + :type timestamp: datetime + :param results: The final values of all variables used in the evaluation + of the autoscale formula. Each variable value is returned in the form + $variable=value, and variables are separated by semicolons. + :type results: str + :param error: Details of the error encountered evaluating the autoscale + formula on the pool, if the evaluation was unsuccessful. + :type error: ~azure.batch.models.AutoScaleRunError + """ + + _validation = { + 'timestamp': {'required': True}, + } + + _attribute_map = { + 'timestamp': {'key': 'timestamp', 'type': 'iso-8601'}, + 'results': {'key': 'results', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'AutoScaleRunError'}, + } + + def __init__(self, **kwargs): + super(AutoScaleRun, self).__init__(**kwargs) + self.timestamp = kwargs.get('timestamp', None) + self.results = kwargs.get('results', None) + self.error = kwargs.get('error', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/auto_scale_run_error.py b/azext/generated/sdk/batch/v2018_08_01/models/auto_scale_run_error.py new file mode 100644 index 00000000..d0d7b163 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/auto_scale_run_error.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoScaleRunError(Model): + """An error that occurred when executing or evaluating a pool autoscale + formula. + + :param code: An identifier for the autoscale error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the autoscale error, intended to be + suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the autoscale + error. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(AutoScaleRunError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.values = kwargs.get('values', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/auto_scale_run_error_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/auto_scale_run_error_py3.py new file mode 100644 index 00000000..8ffa9805 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/auto_scale_run_error_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoScaleRunError(Model): + """An error that occurred when executing or evaluating a pool autoscale + formula. + + :param code: An identifier for the autoscale error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the autoscale error, intended to be + suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the autoscale + error. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, code: str=None, message: str=None, values=None, **kwargs) -> None: + super(AutoScaleRunError, self).__init__(**kwargs) + self.code = code + self.message = message + self.values = values diff --git a/azext/generated/sdk/batch/v2018_08_01/models/auto_scale_run_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/auto_scale_run_py3.py new file mode 100644 index 00000000..9f58e936 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/auto_scale_run_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoScaleRun(Model): + """The results and errors from an execution of a pool autoscale formula. + + All required parameters must be populated in order to send to Azure. + + :param timestamp: Required. The time at which the autoscale formula was + last evaluated. + :type timestamp: datetime + :param results: The final values of all variables used in the evaluation + of the autoscale formula. Each variable value is returned in the form + $variable=value, and variables are separated by semicolons. + :type results: str + :param error: Details of the error encountered evaluating the autoscale + formula on the pool, if the evaluation was unsuccessful. + :type error: ~azure.batch.models.AutoScaleRunError + """ + + _validation = { + 'timestamp': {'required': True}, + } + + _attribute_map = { + 'timestamp': {'key': 'timestamp', 'type': 'iso-8601'}, + 'results': {'key': 'results', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'AutoScaleRunError'}, + } + + def __init__(self, *, timestamp, results: str=None, error=None, **kwargs) -> None: + super(AutoScaleRun, self).__init__(**kwargs) + self.timestamp = timestamp + self.results = results + self.error = error diff --git a/azext/generated/sdk/batch/v2018_08_01/models/auto_user_specification.py b/azext/generated/sdk/batch/v2018_08_01/models/auto_user_specification.py new file mode 100644 index 00000000..60127c74 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/auto_user_specification.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoUserSpecification(Model): + """Specifies the parameters for the auto user that runs a task on the Batch + service. + + :param scope: The scope for the auto user. The default value is task. + Possible values include: 'task', 'pool' + :type scope: str or ~azure.batch.models.AutoUserScope + :param elevation_level: The elevation level of the auto user. The default + value is nonAdmin. Possible values include: 'nonAdmin', 'admin' + :type elevation_level: str or ~azure.batch.models.ElevationLevel + """ + + _attribute_map = { + 'scope': {'key': 'scope', 'type': 'AutoUserScope'}, + 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, + } + + def __init__(self, **kwargs): + super(AutoUserSpecification, self).__init__(**kwargs) + self.scope = kwargs.get('scope', None) + self.elevation_level = kwargs.get('elevation_level', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/auto_user_specification_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/auto_user_specification_py3.py new file mode 100644 index 00000000..bc590d2c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/auto_user_specification_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoUserSpecification(Model): + """Specifies the parameters for the auto user that runs a task on the Batch + service. + + :param scope: The scope for the auto user. The default value is task. + Possible values include: 'task', 'pool' + :type scope: str or ~azure.batch.models.AutoUserScope + :param elevation_level: The elevation level of the auto user. The default + value is nonAdmin. Possible values include: 'nonAdmin', 'admin' + :type elevation_level: str or ~azure.batch.models.ElevationLevel + """ + + _attribute_map = { + 'scope': {'key': 'scope', 'type': 'AutoUserScope'}, + 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, + } + + def __init__(self, *, scope=None, elevation_level=None, **kwargs) -> None: + super(AutoUserSpecification, self).__init__(**kwargs) + self.scope = scope + self.elevation_level = elevation_level diff --git a/azext/generated/sdk/batch/v2018_08_01/models/batch_error.py b/azext/generated/sdk/batch/v2018_08_01/models/batch_error.py new file mode 100644 index 00000000..3857ac96 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/batch_error.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from msrest.exceptions import HttpOperationError + + +class BatchError(Model): + """An error response received from the Azure Batch service. + + :param code: An identifier for the error. Codes are invariant and are + intended to be consumed programmatically. + :type code: str + :param message: A message describing the error, intended to be suitable + for display in a user interface. + :type message: ~azure.batch.models.ErrorMessage + :param values: A collection of key-value pairs containing additional + details about the error. + :type values: list[~azure.batch.models.BatchErrorDetail] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'ErrorMessage'}, + 'values': {'key': 'values', 'type': '[BatchErrorDetail]'}, + } + + def __init__(self, **kwargs): + super(BatchError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.values = kwargs.get('values', None) + + +class BatchErrorException(HttpOperationError): + """Server responsed with exception of type: 'BatchError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, deserialize, response, *args): + + super(BatchErrorException, self).__init__(deserialize, response, 'BatchError', *args) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/batch_error_detail.py b/azext/generated/sdk/batch/v2018_08_01/models/batch_error_detail.py new file mode 100644 index 00000000..a892678c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/batch_error_detail.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BatchErrorDetail(Model): + """An item of additional information included in an Azure Batch error + response. + + :param key: An identifier specifying the meaning of the Value property. + :type key: str + :param value: The additional information included with the error response. + :type value: str + """ + + _attribute_map = { + 'key': {'key': 'key', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(BatchErrorDetail, self).__init__(**kwargs) + self.key = kwargs.get('key', None) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/batch_error_detail_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/batch_error_detail_py3.py new file mode 100644 index 00000000..8aa8a85b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/batch_error_detail_py3.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BatchErrorDetail(Model): + """An item of additional information included in an Azure Batch error + response. + + :param key: An identifier specifying the meaning of the Value property. + :type key: str + :param value: The additional information included with the error response. + :type value: str + """ + + _attribute_map = { + 'key': {'key': 'key', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, *, key: str=None, value: str=None, **kwargs) -> None: + super(BatchErrorDetail, self).__init__(**kwargs) + self.key = key + self.value = value diff --git a/azext/generated/sdk/batch/v2018_08_01/models/batch_error_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/batch_error_py3.py new file mode 100644 index 00000000..a6e49569 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/batch_error_py3.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from msrest.exceptions import HttpOperationError + + +class BatchError(Model): + """An error response received from the Azure Batch service. + + :param code: An identifier for the error. Codes are invariant and are + intended to be consumed programmatically. + :type code: str + :param message: A message describing the error, intended to be suitable + for display in a user interface. + :type message: ~azure.batch.models.ErrorMessage + :param values: A collection of key-value pairs containing additional + details about the error. + :type values: list[~azure.batch.models.BatchErrorDetail] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'ErrorMessage'}, + 'values': {'key': 'values', 'type': '[BatchErrorDetail]'}, + } + + def __init__(self, *, code: str=None, message=None, values=None, **kwargs) -> None: + super(BatchError, self).__init__(**kwargs) + self.code = code + self.message = message + self.values = values + + +class BatchErrorException(HttpOperationError): + """Server responsed with exception of type: 'BatchError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, deserialize, response, *args): + + super(BatchErrorException, self).__init__(deserialize, response, 'BatchError', *args) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/batch_service_client_enums.py b/azext/generated/sdk/batch/v2018_08_01/models/batch_service_client_enums.py new file mode 100644 index 00000000..a2c68e71 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/batch_service_client_enums.py @@ -0,0 +1,277 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum + + +class OSType(str, Enum): + + linux = "linux" #: The Linux operating system. + windows = "windows" #: The Windows operating system. + + +class AccessScope(str, Enum): + + job = "job" #: Grants access to perform all operations on the job containing the task. + + +class CertificateState(str, Enum): + + active = "active" #: The certificate is available for use in pools. + deleting = "deleting" #: The user has requested that the certificate be deleted, but the delete operation has not yet completed. You may not reference the certificate when creating or updating pools. + delete_failed = "deletefailed" #: The user requested that the certificate be deleted, but there are pools that still have references to the certificate, or it is still installed on one or more compute nodes. (The latter can occur if the certificate has been removed from the pool, but the node has not yet restarted. Nodes refresh their certificates only when they restart.) You may use the cancel certificate delete operation to cancel the delete, or the delete certificate operation to retry the delete. + + +class CertificateFormat(str, Enum): + + pfx = "pfx" #: The certificate is a PFX (PKCS#12) formatted certificate or certificate chain. + cer = "cer" #: The certificate is a base64-encoded X.509 certificate. + + +class JobAction(str, Enum): + + none = "none" #: Take no action. + disable = "disable" #: Disable the job. This is equivalent to calling the disable job API, with a disableTasks value of requeue. + terminate = "terminate" #: Terminate the job. The terminateReason in the job's executionInfo is set to "TaskFailed". + + +class DependencyAction(str, Enum): + + satisfy = "satisfy" #: Satisfy the task's dependencies. + block = "block" #: Block the task's dependencies. + + +class AutoUserScope(str, Enum): + + task = "task" #: Specifies that the service should create a new user for the task. + pool = "pool" #: Specifies that the task runs as the common auto user account which is created on every node in a pool. + + +class ElevationLevel(str, Enum): + + non_admin = "nonadmin" #: The user is a standard user without elevated access. + admin = "admin" #: The user is a user with elevated access and operates with full Administrator permissions. + + +class OutputFileUploadCondition(str, Enum): + + task_success = "tasksuccess" #: Upload the file(s) only after the task process exits with an exit code of 0. + task_failure = "taskfailure" #: Upload the file(s) only after the task process exits with a nonzero exit code. + task_completion = "taskcompletion" #: Upload the file(s) after the task process exits, no matter what the exit code was. + + +class ComputeNodeFillType(str, Enum): + + spread = "spread" #: Tasks should be assigned evenly across all nodes in the pool. + pack = "pack" #: As many tasks as possible (maxTasksPerNode) should be assigned to each node in the pool before any tasks are assigned to the next node in the pool. + + +class CertificateStoreLocation(str, Enum): + + current_user = "currentuser" #: Certificates should be installed to the CurrentUser certificate store. + local_machine = "localmachine" #: Certificates should be installed to the LocalMachine certificate store. + + +class CertificateVisibility(str, Enum): + + start_task = "starttask" #: The certificate should be visible to the user account under which the start task is run. + task = "task" #: The certificate should be visible to the user accounts under which job tasks are run. + remote_user = "remoteuser" #: The certificate should be visible to the user accounts under which users remotely access the node. + + +class CachingType(str, Enum): + + none = "none" #: The caching mode for the disk is not enabled. + read_only = "readonly" #: The caching mode for the disk is read only. + read_write = "readwrite" #: The caching mode for the disk is read and write. + + +class StorageAccountType(str, Enum): + + standard_lrs = "standard_lrs" #: The data disk should use standard locally redundant storage. + premium_lrs = "premium_lrs" #: The data disk should use premium locally redundant storage. + + +class InboundEndpointProtocol(str, Enum): + + tcp = "tcp" #: Use TCP for the endpoint. + udp = "udp" #: Use UDP for the endpoint. + + +class NetworkSecurityGroupRuleAccess(str, Enum): + + allow = "allow" #: Allow access. + deny = "deny" #: Deny access. + + +class PoolLifetimeOption(str, Enum): + + job_schedule = "jobschedule" #: The pool exists for the lifetime of the job schedule. The Batch Service creates the pool when it creates the first job on the schedule. You may apply this option only to job schedules, not to jobs. + job = "job" #: The pool exists for the lifetime of the job to which it is dedicated. The Batch service creates the pool when it creates the job. If the 'job' option is applied to a job schedule, the Batch service creates a new auto pool for every job created on the schedule. + + +class OnAllTasksComplete(str, Enum): + + no_action = "noaction" #: Do nothing. The job remains active unless terminated or disabled by some other means. + terminate_job = "terminatejob" #: Terminate the job. The job's terminateReason is set to 'AllTasksComplete'. + + +class OnTaskFailure(str, Enum): + + no_action = "noaction" #: Do nothing. The job remains active unless terminated or disabled by some other means. + perform_exit_options_job_action = "performexitoptionsjobaction" #: Take the action associated with the task exit condition in the task's exitConditions collection. (This may still result in no action being taken, if that is what the task specifies.) + + +class JobScheduleState(str, Enum): + + active = "active" #: The job schedule is active and will create jobs as per its schedule. + completed = "completed" #: The schedule has terminated, either by reaching its end time or by the user terminating it explicitly. + disabled = "disabled" #: The user has disabled the schedule. The scheduler will not initiate any new jobs will on this schedule, but any existing active job will continue to run. + terminating = "terminating" #: The schedule has no more work to do, or has been explicitly terminated by the user, but the termination operation is still in progress. The scheduler will not initiate any new jobs for this schedule, nor is any existing job active. + deleting = "deleting" #: The user has requested that the schedule be deleted, but the delete operation is still in progress. The scheduler will not initiate any new jobs for this schedule, and will delete any existing jobs and tasks under the schedule, including any active job. The schedule will be deleted when all jobs and tasks under the schedule have been deleted. + + +class ErrorCategory(str, Enum): + + user_error = "usererror" #: The error is due to a user issue, such as misconfiguration. + server_error = "servererror" #: The error is due to an internal server issue. + + +class JobState(str, Enum): + + active = "active" #: The job is available to have tasks scheduled. + disabling = "disabling" #: A user has requested that the job be disabled, but the disable operation is still in progress (for example, waiting for tasks to terminate). + disabled = "disabled" #: A user has disabled the job. No tasks are running, and no new tasks will be scheduled. + enabling = "enabling" #: A user has requested that the job be enabled, but the enable operation is still in progress. + terminating = "terminating" #: The job is about to complete, either because a Job Manager task has completed or because the user has terminated the job, but the terminate operation is still in progress (for example, because Job Release tasks are running). + completed = "completed" #: All tasks have terminated, and the system will not accept any more tasks or any further changes to the job. + deleting = "deleting" #: A user has requested that the job be deleted, but the delete operation is still in progress (for example, because the system is still terminating running tasks). + + +class JobPreparationTaskState(str, Enum): + + running = "running" #: The task is currently running (including retrying). + completed = "completed" #: The task has exited with exit code 0, or the task has exhausted its retry limit, or the Batch service was unable to start the task due to task preparation errors (such as resource file download failures). + + +class TaskExecutionResult(str, Enum): + + success = "success" #: The task ran successfully. + failure = "failure" #: There was an error during processing of the task. The failure may have occurred before the task process was launched, while the task process was executing, or after the task process exited. + + +class JobReleaseTaskState(str, Enum): + + running = "running" #: The task is currently running (including retrying). + completed = "completed" #: The task has exited with exit code 0, or the task has exhausted its retry limit, or the Batch service was unable to start the task due to task preparation errors (such as resource file download failures). + + +class PoolState(str, Enum): + + active = "active" #: The pool is available to run tasks subject to the availability of compute nodes. + deleting = "deleting" #: The user has requested that the pool be deleted, but the delete operation has not yet completed. + upgrading = "upgrading" #: The user has requested that the operating system of the pool's nodes be upgraded, but the upgrade operation has not yet completed (that is, some nodes in the pool have not yet been upgraded). While upgrading, the pool may be able to run tasks (with reduced capacity) but this is not guaranteed. + + +class AllocationState(str, Enum): + + steady = "steady" #: The pool is not resizing. There are no changes to the number of nodes in the pool in progress. A pool enters this state when it is created and when no operations are being performed on the pool to change the number of nodes. + resizing = "resizing" #: The pool is resizing; that is, compute nodes are being added to or removed from the pool. + stopping = "stopping" #: The pool was resizing, but the user has requested that the resize be stopped, but the stop request has not yet been completed. + + +class TaskState(str, Enum): + + active = "active" #: The task is queued and able to run, but is not currently assigned to a compute node. A task enters this state when it is created, when it is enabled after being disabled, or when it is awaiting a retry after a failed run. + preparing = "preparing" #: The task has been assigned to a compute node, but is waiting for a required Job Preparation task to complete on the node. If the Job Preparation task succeeds, the task will move to running. If the Job Preparation task fails, the task will return to active and will be eligible to be assigned to a different node. + running = "running" #: The task is running on a compute node. This includes task-level preparation such as downloading resource files or deploying application packages specified on the task - it does not necessarily mean that the task command line has started executing. + completed = "completed" #: The task is no longer eligible to run, usually because the task has finished successfully, or the task has finished unsuccessfully and has exhausted its retry limit. A task is also marked as completed if an error occurred launching the task, or when the task has been terminated. + + +class TaskAddStatus(str, Enum): + + success = "success" #: The task was added successfully. + client_error = "clienterror" #: The task failed to add due to a client error and should not be retried without modifying the request as appropriate. + server_error = "servererror" #: Task failed to add due to a server error and can be retried without modification. + + +class SubtaskState(str, Enum): + + preparing = "preparing" #: The task has been assigned to a compute node, but is waiting for a required Job Preparation task to complete on the node. If the Job Preparation task succeeds, the task will move to running. If the Job Preparation task fails, the task will return to active and will be eligible to be assigned to a different node. + running = "running" #: The task is running on a compute node. This includes task-level preparation such as downloading resource files or deploying application packages specified on the task - it does not necessarily mean that the task command line has started executing. + completed = "completed" #: The task is no longer eligible to run, usually because the task has finished successfully, or the task has finished unsuccessfully and has exhausted its retry limit. A task is also marked as completed if an error occurred launching the task, or when the task has been terminated. + + +class StartTaskState(str, Enum): + + running = "running" #: The start task is currently running. + completed = "completed" #: The start task has exited with exit code 0, or the start task has failed and the retry limit has reached, or the start task process did not run due to task preparation errors (such as resource file download failures). + + +class ComputeNodeState(str, Enum): + + idle = "idle" #: The node is not currently running a task. + rebooting = "rebooting" #: The node is rebooting. + reimaging = "reimaging" #: The node is reimaging. + running = "running" #: The node is running one or more tasks (other than a start task). + unusable = "unusable" #: The node cannot be used for task execution due to errors. + creating = "creating" #: The Batch service has obtained the underlying virtual machine from Azure Compute, but it has not yet started to join the pool. + starting = "starting" #: The Batch service is starting on the underlying virtual machine. + waiting_for_start_task = "waitingforstarttask" #: The start task has started running on the compute node, but waitForSuccess is set and the start task has not yet completed. + start_task_failed = "starttaskfailed" #: The start task has failed on the compute node (and exhausted all retries), and waitForSuccess is set. The node is not usable for running tasks. + unknown = "unknown" #: The Batch service has lost contact with the node, and does not know its true state. + leaving_pool = "leavingpool" #: The node is leaving the pool, either because the user explicitly removed it or because the pool is resizing or autoscaling down. + offline = "offline" #: The node is not currently running a task, and scheduling of new tasks to the node is disabled. + preempted = "preempted" #: The low-priority node has been preempted. Tasks which were running on the node when it was preempted will be rescheduled when another node becomes available. + + +class SchedulingState(str, Enum): + + enabled = "enabled" #: Tasks can be scheduled on the node. + disabled = "disabled" #: No new tasks will be scheduled on the node. Tasks already running on the node may still run to completion. All nodes start with scheduling enabled. + + +class DisableJobOption(str, Enum): + + requeue = "requeue" #: Terminate running tasks and requeue them. The tasks will run again when the job is enabled. + terminate = "terminate" #: Terminate running tasks. The tasks will be completed with failureInfo indicating that they were terminated, and will not run again. + wait = "wait" #: Allow currently running tasks to complete. + + +class ComputeNodeDeallocationOption(str, Enum): + + requeue = "requeue" #: Terminate running task processes and requeue the tasks. The tasks will run again when a node is available. Remove nodes as soon as tasks have been terminated. + terminate = "terminate" #: Terminate running tasks. The tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Remove nodes as soon as tasks have been terminated. + task_completion = "taskcompletion" #: Allow currently running tasks to complete. Schedule no new tasks while waiting. Remove nodes when all tasks have completed. + retained_data = "retaineddata" #: Allow currently running tasks to complete, then wait for all task data retention periods to expire. Schedule no new tasks while waiting. Remove nodes when all task retention periods have expired. + + +class ComputeNodeRebootOption(str, Enum): + + requeue = "requeue" #: Terminate running task processes and requeue the tasks. The tasks will run again when a node is available. Restart the node as soon as tasks have been terminated. + terminate = "terminate" #: Terminate running tasks. The tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Restart the node as soon as tasks have been terminated. + task_completion = "taskcompletion" #: Allow currently running tasks to complete. Schedule no new tasks while waiting. Restart the node when all tasks have completed. + retained_data = "retaineddata" #: Allow currently running tasks to complete, then wait for all task data retention periods to expire. Schedule no new tasks while waiting. Restart the node when all task retention periods have expired. + + +class ComputeNodeReimageOption(str, Enum): + + requeue = "requeue" #: Terminate running task processes and requeue the tasks. The tasks will run again when a node is available. Reimage the node as soon as tasks have been terminated. + terminate = "terminate" #: Terminate running tasks. The tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Reimage the node as soon as tasks have been terminated. + task_completion = "taskcompletion" #: Allow currently running tasks to complete. Schedule no new tasks while waiting. Reimage the node when all tasks have completed. + retained_data = "retaineddata" #: Allow currently running tasks to complete, then wait for all task data retention periods to expire. Schedule no new tasks while waiting. Reimage the node when all task retention periods have expired. + + +class DisableComputeNodeSchedulingOption(str, Enum): + + requeue = "requeue" #: Terminate running task processes and requeue the tasks. The tasks may run again on other compute nodes, or when task scheduling is re-enabled on this node. Enter offline state as soon as tasks have been terminated. + terminate = "terminate" #: Terminate running tasks. The tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Enter offline state as soon as tasks have been terminated. + task_completion = "taskcompletion" #: Allow currently running tasks to complete. Schedule no new tasks while waiting. Enter offline state when all tasks have completed. diff --git a/azext/generated/sdk/batch/v2018_08_01/models/certificate.py b/azext/generated/sdk/batch/v2018_08_01/models/certificate.py new file mode 100644 index 00000000..e44c3f85 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/certificate.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Certificate(Model): + """A certificate that can be installed on compute nodes and can be used to + authenticate operations on the machine. + + :param thumbprint: The X.509 thumbprint of the certificate. This is a + sequence of up to 40 hex digits. + :type thumbprint: str + :param thumbprint_algorithm: The algorithm used to derive the thumbprint. + :type thumbprint_algorithm: str + :param url: The URL of the certificate. + :type url: str + :param state: The current state of the certificate. Possible values + include: 'active', 'deleting', 'deleteFailed' + :type state: str or ~azure.batch.models.CertificateState + :param state_transition_time: The time at which the certificate entered + its current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the certificate. This + property is not set if the certificate is in its initial active state. + Possible values include: 'active', 'deleting', 'deleteFailed' + :type previous_state: str or ~azure.batch.models.CertificateState + :param previous_state_transition_time: The time at which the certificate + entered its previous state. This property is not set if the certificate is + in its initial Active state. + :type previous_state_transition_time: datetime + :param public_data: The public part of the certificate as a base-64 + encoded .cer file. + :type public_data: str + :param delete_certificate_error: The error that occurred on the last + attempt to delete this certificate. This property is set only if the + certificate is in the DeleteFailed state. + :type delete_certificate_error: ~azure.batch.models.DeleteCertificateError + """ + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'CertificateState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'CertificateState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'public_data': {'key': 'publicData', 'type': 'str'}, + 'delete_certificate_error': {'key': 'deleteCertificateError', 'type': 'DeleteCertificateError'}, + } + + def __init__(self, **kwargs): + super(Certificate, self).__init__(**kwargs) + self.thumbprint = kwargs.get('thumbprint', None) + self.thumbprint_algorithm = kwargs.get('thumbprint_algorithm', None) + self.url = kwargs.get('url', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.previous_state = kwargs.get('previous_state', None) + self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) + self.public_data = kwargs.get('public_data', None) + self.delete_certificate_error = kwargs.get('delete_certificate_error', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/certificate_add_options.py b/azext/generated/sdk/batch/v2018_08_01/models/certificate_add_options.py new file mode 100644 index 00000000..f2c8d5bb --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/certificate_add_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(CertificateAddOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/certificate_add_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/certificate_add_options_py3.py new file mode 100644 index 00000000..c7d61b36 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/certificate_add_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(CertificateAddOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/certificate_add_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/certificate_add_parameter.py new file mode 100644 index 00000000..809efd66 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/certificate_add_parameter.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateAddParameter(Model): + """A certificate that can be installed on compute nodes and can be used to + authenticate operations on the machine. + + All required parameters must be populated in order to send to Azure. + + :param thumbprint: Required. The X.509 thumbprint of the certificate. This + is a sequence of up to 40 hex digits (it may include spaces but these are + removed). + :type thumbprint: str + :param thumbprint_algorithm: Required. The algorithm used to derive the + thumbprint. This must be sha1. + :type thumbprint_algorithm: str + :param data: Required. The base64-encoded contents of the certificate. The + maximum size is 10KB. + :type data: str + :param certificate_format: The format of the certificate data. Possible + values include: 'pfx', 'cer' + :type certificate_format: str or ~azure.batch.models.CertificateFormat + :param password: The password to access the certificate's private key. + This is required if the certificate format is pfx. It should be omitted if + the certificate format is cer. + :type password: str + """ + + _validation = { + 'thumbprint': {'required': True}, + 'thumbprint_algorithm': {'required': True}, + 'data': {'required': True}, + } + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'data': {'key': 'data', 'type': 'str'}, + 'certificate_format': {'key': 'certificateFormat', 'type': 'CertificateFormat'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(CertificateAddParameter, self).__init__(**kwargs) + self.thumbprint = kwargs.get('thumbprint', None) + self.thumbprint_algorithm = kwargs.get('thumbprint_algorithm', None) + self.data = kwargs.get('data', None) + self.certificate_format = kwargs.get('certificate_format', None) + self.password = kwargs.get('password', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/certificate_add_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/certificate_add_parameter_py3.py new file mode 100644 index 00000000..2a560b2b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/certificate_add_parameter_py3.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateAddParameter(Model): + """A certificate that can be installed on compute nodes and can be used to + authenticate operations on the machine. + + All required parameters must be populated in order to send to Azure. + + :param thumbprint: Required. The X.509 thumbprint of the certificate. This + is a sequence of up to 40 hex digits (it may include spaces but these are + removed). + :type thumbprint: str + :param thumbprint_algorithm: Required. The algorithm used to derive the + thumbprint. This must be sha1. + :type thumbprint_algorithm: str + :param data: Required. The base64-encoded contents of the certificate. The + maximum size is 10KB. + :type data: str + :param certificate_format: The format of the certificate data. Possible + values include: 'pfx', 'cer' + :type certificate_format: str or ~azure.batch.models.CertificateFormat + :param password: The password to access the certificate's private key. + This is required if the certificate format is pfx. It should be omitted if + the certificate format is cer. + :type password: str + """ + + _validation = { + 'thumbprint': {'required': True}, + 'thumbprint_algorithm': {'required': True}, + 'data': {'required': True}, + } + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'data': {'key': 'data', 'type': 'str'}, + 'certificate_format': {'key': 'certificateFormat', 'type': 'CertificateFormat'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, *, thumbprint: str, thumbprint_algorithm: str, data: str, certificate_format=None, password: str=None, **kwargs) -> None: + super(CertificateAddParameter, self).__init__(**kwargs) + self.thumbprint = thumbprint + self.thumbprint_algorithm = thumbprint_algorithm + self.data = data + self.certificate_format = certificate_format + self.password = password diff --git a/azext/generated/sdk/batch/v2018_08_01/models/certificate_cancel_deletion_options.py b/azext/generated/sdk/batch/v2018_08_01/models/certificate_cancel_deletion_options.py new file mode 100644 index 00000000..5c7c936c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/certificate_cancel_deletion_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateCancelDeletionOptions(Model): + """Additional parameters for cancel_deletion operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(CertificateCancelDeletionOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/certificate_cancel_deletion_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/certificate_cancel_deletion_options_py3.py new file mode 100644 index 00000000..8afbcf24 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/certificate_cancel_deletion_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateCancelDeletionOptions(Model): + """Additional parameters for cancel_deletion operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(CertificateCancelDeletionOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/certificate_delete_options.py b/azext/generated/sdk/batch/v2018_08_01/models/certificate_delete_options.py new file mode 100644 index 00000000..5ff7ee83 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/certificate_delete_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(CertificateDeleteOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/certificate_delete_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/certificate_delete_options_py3.py new file mode 100644 index 00000000..47f91b10 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/certificate_delete_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(CertificateDeleteOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/certificate_get_options.py b/azext/generated/sdk/batch/v2018_08_01/models/certificate_get_options.py new file mode 100644 index 00000000..2b474c17 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/certificate_get_options.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(CertificateGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/certificate_get_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/certificate_get_options_py3.py new file mode 100644 index 00000000..4bd6bb70 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/certificate_get_options_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(CertificateGetOptions, self).__init__(**kwargs) + self.select = select + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/certificate_list_options.py b/azext/generated/sdk/batch/v2018_08_01/models/certificate_list_options.py new file mode 100644 index 00000000..cb3134af --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/certificate_list_options.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 certificates can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(CertificateListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/certificate_list_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/certificate_list_options_py3.py new file mode 100644 index 00000000..461b8044 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/certificate_list_options_py3.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 certificates can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(CertificateListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/certificate_paged.py b/azext/generated/sdk/batch/v2018_08_01/models/certificate_paged.py new file mode 100644 index 00000000..985d7838 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/certificate_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class CertificatePaged(Paged): + """ + A paging container for iterating over a list of :class:`Certificate ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[Certificate]'} + } + + def __init__(self, *args, **kwargs): + + super(CertificatePaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/certificate_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/certificate_py3.py new file mode 100644 index 00000000..4e0b71c5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/certificate_py3.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Certificate(Model): + """A certificate that can be installed on compute nodes and can be used to + authenticate operations on the machine. + + :param thumbprint: The X.509 thumbprint of the certificate. This is a + sequence of up to 40 hex digits. + :type thumbprint: str + :param thumbprint_algorithm: The algorithm used to derive the thumbprint. + :type thumbprint_algorithm: str + :param url: The URL of the certificate. + :type url: str + :param state: The current state of the certificate. Possible values + include: 'active', 'deleting', 'deleteFailed' + :type state: str or ~azure.batch.models.CertificateState + :param state_transition_time: The time at which the certificate entered + its current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the certificate. This + property is not set if the certificate is in its initial active state. + Possible values include: 'active', 'deleting', 'deleteFailed' + :type previous_state: str or ~azure.batch.models.CertificateState + :param previous_state_transition_time: The time at which the certificate + entered its previous state. This property is not set if the certificate is + in its initial Active state. + :type previous_state_transition_time: datetime + :param public_data: The public part of the certificate as a base-64 + encoded .cer file. + :type public_data: str + :param delete_certificate_error: The error that occurred on the last + attempt to delete this certificate. This property is set only if the + certificate is in the DeleteFailed state. + :type delete_certificate_error: ~azure.batch.models.DeleteCertificateError + """ + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'CertificateState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'CertificateState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'public_data': {'key': 'publicData', 'type': 'str'}, + 'delete_certificate_error': {'key': 'deleteCertificateError', 'type': 'DeleteCertificateError'}, + } + + def __init__(self, *, thumbprint: str=None, thumbprint_algorithm: str=None, url: str=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, public_data: str=None, delete_certificate_error=None, **kwargs) -> None: + super(Certificate, self).__init__(**kwargs) + self.thumbprint = thumbprint + self.thumbprint_algorithm = thumbprint_algorithm + self.url = url + self.state = state + self.state_transition_time = state_transition_time + self.previous_state = previous_state + self.previous_state_transition_time = previous_state_transition_time + self.public_data = public_data + self.delete_certificate_error = delete_certificate_error diff --git a/azext/generated/sdk/batch/v2018_08_01/models/certificate_reference.py b/azext/generated/sdk/batch/v2018_08_01/models/certificate_reference.py new file mode 100644 index 00000000..976c1908 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/certificate_reference.py @@ -0,0 +1,70 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateReference(Model): + """A reference to a certificate to be installed on compute nodes in a pool. + + All required parameters must be populated in order to send to Azure. + + :param thumbprint: Required. The thumbprint of the certificate. + :type thumbprint: str + :param thumbprint_algorithm: Required. The algorithm with which the + thumbprint is associated. This must be sha1. + :type thumbprint_algorithm: str + :param store_location: The location of the certificate store on the + compute node into which to install the certificate. The default value is + currentuser. This property is applicable only for pools configured with + Windows nodes (that is, created with cloudServiceConfiguration, or with + virtualMachineConfiguration using a Windows image reference). For Linux + compute nodes, the certificates are stored in a directory inside the task + working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is + supplied to the task to query for this location. For certificates with + visibility of 'remoteUser', a 'certs' directory is created in the user's + home directory (e.g., /home/{user-name}/certs) and certificates are placed + in that directory. Possible values include: 'currentUser', 'localMachine' + :type store_location: str or ~azure.batch.models.CertificateStoreLocation + :param store_name: The name of the certificate store on the compute node + into which to install the certificate. This property is applicable only + for pools configured with Windows nodes (that is, created with + cloudServiceConfiguration, or with virtualMachineConfiguration using a + Windows image reference). Common store names include: My, Root, CA, Trust, + Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but + any custom store name can also be used. The default value is My. + :type store_name: str + :param visibility: Which user accounts on the compute node should have + access to the private data of the certificate. You can specify more than + one visibility in this collection. The default is all accounts. + :type visibility: list[str or ~azure.batch.models.CertificateVisibility] + """ + + _validation = { + 'thumbprint': {'required': True}, + 'thumbprint_algorithm': {'required': True}, + } + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'store_location': {'key': 'storeLocation', 'type': 'CertificateStoreLocation'}, + 'store_name': {'key': 'storeName', 'type': 'str'}, + 'visibility': {'key': 'visibility', 'type': '[CertificateVisibility]'}, + } + + def __init__(self, **kwargs): + super(CertificateReference, self).__init__(**kwargs) + self.thumbprint = kwargs.get('thumbprint', None) + self.thumbprint_algorithm = kwargs.get('thumbprint_algorithm', None) + self.store_location = kwargs.get('store_location', None) + self.store_name = kwargs.get('store_name', None) + self.visibility = kwargs.get('visibility', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/certificate_reference_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/certificate_reference_py3.py new file mode 100644 index 00000000..46e52e27 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/certificate_reference_py3.py @@ -0,0 +1,70 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateReference(Model): + """A reference to a certificate to be installed on compute nodes in a pool. + + All required parameters must be populated in order to send to Azure. + + :param thumbprint: Required. The thumbprint of the certificate. + :type thumbprint: str + :param thumbprint_algorithm: Required. The algorithm with which the + thumbprint is associated. This must be sha1. + :type thumbprint_algorithm: str + :param store_location: The location of the certificate store on the + compute node into which to install the certificate. The default value is + currentuser. This property is applicable only for pools configured with + Windows nodes (that is, created with cloudServiceConfiguration, or with + virtualMachineConfiguration using a Windows image reference). For Linux + compute nodes, the certificates are stored in a directory inside the task + working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is + supplied to the task to query for this location. For certificates with + visibility of 'remoteUser', a 'certs' directory is created in the user's + home directory (e.g., /home/{user-name}/certs) and certificates are placed + in that directory. Possible values include: 'currentUser', 'localMachine' + :type store_location: str or ~azure.batch.models.CertificateStoreLocation + :param store_name: The name of the certificate store on the compute node + into which to install the certificate. This property is applicable only + for pools configured with Windows nodes (that is, created with + cloudServiceConfiguration, or with virtualMachineConfiguration using a + Windows image reference). Common store names include: My, Root, CA, Trust, + Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but + any custom store name can also be used. The default value is My. + :type store_name: str + :param visibility: Which user accounts on the compute node should have + access to the private data of the certificate. You can specify more than + one visibility in this collection. The default is all accounts. + :type visibility: list[str or ~azure.batch.models.CertificateVisibility] + """ + + _validation = { + 'thumbprint': {'required': True}, + 'thumbprint_algorithm': {'required': True}, + } + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'store_location': {'key': 'storeLocation', 'type': 'CertificateStoreLocation'}, + 'store_name': {'key': 'storeName', 'type': 'str'}, + 'visibility': {'key': 'visibility', 'type': '[CertificateVisibility]'}, + } + + def __init__(self, *, thumbprint: str, thumbprint_algorithm: str, store_location=None, store_name: str=None, visibility=None, **kwargs) -> None: + super(CertificateReference, self).__init__(**kwargs) + self.thumbprint = thumbprint + self.thumbprint_algorithm = thumbprint_algorithm + self.store_location = store_location + self.store_name = store_name + self.visibility = visibility diff --git a/azext/generated/sdk/batch/v2018_08_01/models/cloud_job.py b/azext/generated/sdk/batch/v2018_08_01/models/cloud_job.py new file mode 100644 index 00000000..4e6cafbf --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/cloud_job.py @@ -0,0 +1,158 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudJob(Model): + """An Azure Batch job. + + :param id: A string that uniquely identifies the job within the account. + The ID is case-preserving and case-insensitive (that is, you may not have + two IDs within an account that differ only by case). + :type id: str + :param display_name: The display name for the job. + :type display_name: str + :param uses_task_dependencies: Whether tasks in the job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + :param url: The URL of the job. + :type url: str + :param e_tag: The ETag of the job. This is an opaque string. You can use + it to detect whether the job has changed between requests. In particular, + you can be pass the ETag when updating a job to specify that your changes + should take effect only if nobody else has modified the job in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the job. This is the last + time at which the job level data, such as the job state or priority, + changed. It does not factor in task-level changes such as adding new tasks + or tasks changing state. + :type last_modified: datetime + :param creation_time: The creation time of the job. + :type creation_time: datetime + :param state: The current state of the job. Possible values include: + 'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed', + 'deleting' + :type state: str or ~azure.batch.models.JobState + :param state_transition_time: The time at which the job entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the job. This property is not + set if the job is in its initial Active state. Possible values include: + 'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed', + 'deleting' + :type previous_state: str or ~azure.batch.models.JobState + :param previous_state_transition_time: The time at which the job entered + its previous state. This property is not set if the job is in its initial + Active state. + :type previous_state_transition_time: datetime + :param priority: The priority of the job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. The default value is 0. + :type priority: int + :param constraints: The execution constraints for the job. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: Details of a Job Manager task to be launched when + the job is started. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation task. The Job Preparation + task is a special task run on each node before any other task of the job. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release task. The Job Release task is a + special task run at the end of the job on each node that has run any other + task of the job. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: The list of common environment + variable settings. These environment variables are set for all tasks in + the job (including the Job Manager, Job Preparation and Job Release + tasks). Individual tasks can override an environment setting specified + here by specifying the same setting name with a different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: The pool settings associated with the job. + :type pool_info: ~azure.batch.models.PoolInformation + :param on_all_tasks_complete: The action the Batch service should take + when all tasks in the job are in the completed state. The default is + noaction. Possible values include: 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + task in the job fails. A task is considered to have failed if has a + failureInfo. A failureInfo is set if the task completes with a non-zero + exit code after exhausting its retry count, or if there was an error + starting the task, for example due to a resource file download error. The + default is noaction. Possible values include: 'noAction', + 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param metadata: A list of name-value pairs associated with the job as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param execution_info: The execution information for the job. + :type execution_info: ~azure.batch.models.JobExecutionInformation + :param stats: Resource usage statistics for the entire lifetime of the + job. The statistics may not be immediately available. The Batch service + performs periodic roll-up of statistics. The typical delay is about 30 + minutes. + :type stats: ~azure.batch.models.JobStatistics + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'JobState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'execution_info': {'key': 'executionInfo', 'type': 'JobExecutionInformation'}, + 'stats': {'key': 'stats', 'type': 'JobStatistics'}, + } + + def __init__(self, **kwargs): + super(CloudJob, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.uses_task_dependencies = kwargs.get('uses_task_dependencies', None) + self.url = kwargs.get('url', None) + self.e_tag = kwargs.get('e_tag', None) + self.last_modified = kwargs.get('last_modified', None) + self.creation_time = kwargs.get('creation_time', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.previous_state = kwargs.get('previous_state', None) + self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) + self.priority = kwargs.get('priority', None) + self.constraints = kwargs.get('constraints', None) + self.job_manager_task = kwargs.get('job_manager_task', None) + self.job_preparation_task = kwargs.get('job_preparation_task', None) + self.job_release_task = kwargs.get('job_release_task', None) + self.common_environment_settings = kwargs.get('common_environment_settings', None) + self.pool_info = kwargs.get('pool_info', None) + self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) + self.on_task_failure = kwargs.get('on_task_failure', None) + self.metadata = kwargs.get('metadata', None) + self.execution_info = kwargs.get('execution_info', None) + self.stats = kwargs.get('stats', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/cloud_job_paged.py b/azext/generated/sdk/batch/v2018_08_01/models/cloud_job_paged.py new file mode 100644 index 00000000..c642458f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/cloud_job_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class CloudJobPaged(Paged): + """ + A paging container for iterating over a list of :class:`CloudJob ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[CloudJob]'} + } + + def __init__(self, *args, **kwargs): + + super(CloudJobPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/cloud_job_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/cloud_job_py3.py new file mode 100644 index 00000000..226cb3bb --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/cloud_job_py3.py @@ -0,0 +1,158 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudJob(Model): + """An Azure Batch job. + + :param id: A string that uniquely identifies the job within the account. + The ID is case-preserving and case-insensitive (that is, you may not have + two IDs within an account that differ only by case). + :type id: str + :param display_name: The display name for the job. + :type display_name: str + :param uses_task_dependencies: Whether tasks in the job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + :param url: The URL of the job. + :type url: str + :param e_tag: The ETag of the job. This is an opaque string. You can use + it to detect whether the job has changed between requests. In particular, + you can be pass the ETag when updating a job to specify that your changes + should take effect only if nobody else has modified the job in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the job. This is the last + time at which the job level data, such as the job state or priority, + changed. It does not factor in task-level changes such as adding new tasks + or tasks changing state. + :type last_modified: datetime + :param creation_time: The creation time of the job. + :type creation_time: datetime + :param state: The current state of the job. Possible values include: + 'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed', + 'deleting' + :type state: str or ~azure.batch.models.JobState + :param state_transition_time: The time at which the job entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the job. This property is not + set if the job is in its initial Active state. Possible values include: + 'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed', + 'deleting' + :type previous_state: str or ~azure.batch.models.JobState + :param previous_state_transition_time: The time at which the job entered + its previous state. This property is not set if the job is in its initial + Active state. + :type previous_state_transition_time: datetime + :param priority: The priority of the job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. The default value is 0. + :type priority: int + :param constraints: The execution constraints for the job. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: Details of a Job Manager task to be launched when + the job is started. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation task. The Job Preparation + task is a special task run on each node before any other task of the job. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release task. The Job Release task is a + special task run at the end of the job on each node that has run any other + task of the job. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: The list of common environment + variable settings. These environment variables are set for all tasks in + the job (including the Job Manager, Job Preparation and Job Release + tasks). Individual tasks can override an environment setting specified + here by specifying the same setting name with a different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: The pool settings associated with the job. + :type pool_info: ~azure.batch.models.PoolInformation + :param on_all_tasks_complete: The action the Batch service should take + when all tasks in the job are in the completed state. The default is + noaction. Possible values include: 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + task in the job fails. A task is considered to have failed if has a + failureInfo. A failureInfo is set if the task completes with a non-zero + exit code after exhausting its retry count, or if there was an error + starting the task, for example due to a resource file download error. The + default is noaction. Possible values include: 'noAction', + 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param metadata: A list of name-value pairs associated with the job as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param execution_info: The execution information for the job. + :type execution_info: ~azure.batch.models.JobExecutionInformation + :param stats: Resource usage statistics for the entire lifetime of the + job. The statistics may not be immediately available. The Batch service + performs periodic roll-up of statistics. The typical delay is about 30 + minutes. + :type stats: ~azure.batch.models.JobStatistics + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'JobState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'execution_info': {'key': 'executionInfo', 'type': 'JobExecutionInformation'}, + 'stats': {'key': 'stats', 'type': 'JobStatistics'}, + } + + def __init__(self, *, id: str=None, display_name: str=None, uses_task_dependencies: bool=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, priority: int=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, pool_info=None, on_all_tasks_complete=None, on_task_failure=None, metadata=None, execution_info=None, stats=None, **kwargs) -> None: + super(CloudJob, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.uses_task_dependencies = uses_task_dependencies + self.url = url + self.e_tag = e_tag + self.last_modified = last_modified + self.creation_time = creation_time + self.state = state + self.state_transition_time = state_transition_time + self.previous_state = previous_state + self.previous_state_transition_time = previous_state_transition_time + self.priority = priority + self.constraints = constraints + self.job_manager_task = job_manager_task + self.job_preparation_task = job_preparation_task + self.job_release_task = job_release_task + self.common_environment_settings = common_environment_settings + self.pool_info = pool_info + self.on_all_tasks_complete = on_all_tasks_complete + self.on_task_failure = on_task_failure + self.metadata = metadata + self.execution_info = execution_info + self.stats = stats diff --git a/azext/generated/sdk/batch/v2018_08_01/models/cloud_job_schedule.py b/azext/generated/sdk/batch/v2018_08_01/models/cloud_job_schedule.py new file mode 100644 index 00000000..1a2a33fd --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/cloud_job_schedule.py @@ -0,0 +1,107 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudJobSchedule(Model): + """A job schedule that allows recurring jobs by specifying when to run jobs + and a specification used to create each job. + + :param id: A string that uniquely identifies the schedule within the + account. + :type id: str + :param display_name: The display name for the schedule. + :type display_name: str + :param url: The URL of the job schedule. + :type url: str + :param e_tag: The ETag of the job schedule. This is an opaque string. You + can use it to detect whether the job schedule has changed between + requests. In particular, you can be pass the ETag with an Update Job + Schedule request to specify that your changes should take effect only if + nobody else has modified the schedule in the meantime. + :type e_tag: str + :param last_modified: The last modified time of the job schedule. This is + the last time at which the schedule level data, such as the job + specification or recurrence information, changed. It does not factor in + job-level changes such as new jobs being created or jobs changing state. + :type last_modified: datetime + :param creation_time: The creation time of the job schedule. + :type creation_time: datetime + :param state: The current state of the job schedule. Possible values + include: 'active', 'completed', 'disabled', 'terminating', 'deleting' + :type state: str or ~azure.batch.models.JobScheduleState + :param state_transition_time: The time at which the job schedule entered + the current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the job schedule. This + property is not present if the job schedule is in its initial active + state. Possible values include: 'active', 'completed', 'disabled', + 'terminating', 'deleting' + :type previous_state: str or ~azure.batch.models.JobScheduleState + :param previous_state_transition_time: The time at which the job schedule + entered its previous state. This property is not present if the job + schedule is in its initial active state. + :type previous_state_transition_time: datetime + :param schedule: The schedule according to which jobs will be created. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: The details of the jobs to be created on this + schedule. + :type job_specification: ~azure.batch.models.JobSpecification + :param execution_info: Information about jobs that have been and will be + run under this schedule. + :type execution_info: ~azure.batch.models.JobScheduleExecutionInformation + :param metadata: A list of name-value pairs associated with the schedule + as metadata. The Batch service does not assign any meaning to metadata; it + is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param stats: The lifetime resource usage statistics for the job schedule. + The statistics may not be immediately available. The Batch service + performs periodic roll-up of statistics. The typical delay is about 30 + minutes. + :type stats: ~azure.batch.models.JobScheduleStatistics + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobScheduleState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'JobScheduleState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'execution_info': {'key': 'executionInfo', 'type': 'JobScheduleExecutionInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'stats': {'key': 'stats', 'type': 'JobScheduleStatistics'}, + } + + def __init__(self, **kwargs): + super(CloudJobSchedule, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.url = kwargs.get('url', None) + self.e_tag = kwargs.get('e_tag', None) + self.last_modified = kwargs.get('last_modified', None) + self.creation_time = kwargs.get('creation_time', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.previous_state = kwargs.get('previous_state', None) + self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) + self.schedule = kwargs.get('schedule', None) + self.job_specification = kwargs.get('job_specification', None) + self.execution_info = kwargs.get('execution_info', None) + self.metadata = kwargs.get('metadata', None) + self.stats = kwargs.get('stats', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/cloud_job_schedule_paged.py b/azext/generated/sdk/batch/v2018_08_01/models/cloud_job_schedule_paged.py new file mode 100644 index 00000000..3abb6f15 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/cloud_job_schedule_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class CloudJobSchedulePaged(Paged): + """ + A paging container for iterating over a list of :class:`CloudJobSchedule ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[CloudJobSchedule]'} + } + + def __init__(self, *args, **kwargs): + + super(CloudJobSchedulePaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/cloud_job_schedule_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/cloud_job_schedule_py3.py new file mode 100644 index 00000000..1542fe56 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/cloud_job_schedule_py3.py @@ -0,0 +1,107 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudJobSchedule(Model): + """A job schedule that allows recurring jobs by specifying when to run jobs + and a specification used to create each job. + + :param id: A string that uniquely identifies the schedule within the + account. + :type id: str + :param display_name: The display name for the schedule. + :type display_name: str + :param url: The URL of the job schedule. + :type url: str + :param e_tag: The ETag of the job schedule. This is an opaque string. You + can use it to detect whether the job schedule has changed between + requests. In particular, you can be pass the ETag with an Update Job + Schedule request to specify that your changes should take effect only if + nobody else has modified the schedule in the meantime. + :type e_tag: str + :param last_modified: The last modified time of the job schedule. This is + the last time at which the schedule level data, such as the job + specification or recurrence information, changed. It does not factor in + job-level changes such as new jobs being created or jobs changing state. + :type last_modified: datetime + :param creation_time: The creation time of the job schedule. + :type creation_time: datetime + :param state: The current state of the job schedule. Possible values + include: 'active', 'completed', 'disabled', 'terminating', 'deleting' + :type state: str or ~azure.batch.models.JobScheduleState + :param state_transition_time: The time at which the job schedule entered + the current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the job schedule. This + property is not present if the job schedule is in its initial active + state. Possible values include: 'active', 'completed', 'disabled', + 'terminating', 'deleting' + :type previous_state: str or ~azure.batch.models.JobScheduleState + :param previous_state_transition_time: The time at which the job schedule + entered its previous state. This property is not present if the job + schedule is in its initial active state. + :type previous_state_transition_time: datetime + :param schedule: The schedule according to which jobs will be created. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: The details of the jobs to be created on this + schedule. + :type job_specification: ~azure.batch.models.JobSpecification + :param execution_info: Information about jobs that have been and will be + run under this schedule. + :type execution_info: ~azure.batch.models.JobScheduleExecutionInformation + :param metadata: A list of name-value pairs associated with the schedule + as metadata. The Batch service does not assign any meaning to metadata; it + is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param stats: The lifetime resource usage statistics for the job schedule. + The statistics may not be immediately available. The Batch service + performs periodic roll-up of statistics. The typical delay is about 30 + minutes. + :type stats: ~azure.batch.models.JobScheduleStatistics + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobScheduleState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'JobScheduleState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'execution_info': {'key': 'executionInfo', 'type': 'JobScheduleExecutionInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'stats': {'key': 'stats', 'type': 'JobScheduleStatistics'}, + } + + def __init__(self, *, id: str=None, display_name: str=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, schedule=None, job_specification=None, execution_info=None, metadata=None, stats=None, **kwargs) -> None: + super(CloudJobSchedule, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.url = url + self.e_tag = e_tag + self.last_modified = last_modified + self.creation_time = creation_time + self.state = state + self.state_transition_time = state_transition_time + self.previous_state = previous_state + self.previous_state_transition_time = previous_state_transition_time + self.schedule = schedule + self.job_specification = job_specification + self.execution_info = execution_info + self.metadata = metadata + self.stats = stats diff --git a/azext/generated/sdk/batch/v2018_08_01/models/cloud_pool.py b/azext/generated/sdk/batch/v2018_08_01/models/cloud_pool.py new file mode 100644 index 00000000..8d85502d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/cloud_pool.py @@ -0,0 +1,237 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudPool(Model): + """A pool in the Azure Batch service. + + :param id: A string that uniquely identifies the pool within the account. + The ID can contain any combination of alphanumeric characters including + hyphens and underscores, and cannot contain more than 64 characters. The + ID is case-preserving and case-insensitive (that is, you may not have two + IDs within an account that differ only by case). + :type id: str + :param display_name: The display name for the pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param url: The URL of the pool. + :type url: str + :param e_tag: The ETag of the pool. This is an opaque string. You can use + it to detect whether the pool has changed between requests. In particular, + you can be pass the ETag when updating a pool to specify that your changes + should take effect only if nobody else has modified the pool in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the pool. This is the last + time at which the pool level data, such as the targetDedicatedNodes or + enableAutoscale settings, changed. It does not factor in node-level + changes such as a compute node changing state. + :type last_modified: datetime + :param creation_time: The creation time of the pool. + :type creation_time: datetime + :param state: The current state of the pool. Possible values include: + 'active', 'deleting', 'upgrading' + :type state: str or ~azure.batch.models.PoolState + :param state_transition_time: The time at which the pool entered its + current state. + :type state_transition_time: datetime + :param allocation_state: Whether the pool is resizing. Possible values + include: 'steady', 'resizing', 'stopping' + :type allocation_state: str or ~azure.batch.models.AllocationState + :param allocation_state_transition_time: The time at which the pool + entered its current allocation state. + :type allocation_state_transition_time: datetime + :param vm_size: The size of virtual machines in the pool. All virtual + machines in a pool are the same size. For information about available + sizes of virtual machines in pools, see Choose a VM size for compute nodes + in an Azure Batch pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the pool. This property and virtualMachineConfiguration are mutually + exclusive and one of the properties must be specified. This property + cannot be specified if the Batch account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the pool. This property and cloudServiceConfiguration are mutually + exclusive and one of the properties must be specified. + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param resize_timeout: The timeout for allocation of compute nodes to the + pool. This is the timeout for the most recent resize operation. (The + initial sizing when the pool is created counts as a resize.) The default + value is 15 minutes. + :type resize_timeout: timedelta + :param resize_errors: A list of errors encountered while performing the + last resize on the pool. This property is set only if one or more errors + occurred during the last pool resize, and only when the pool + allocationState is Steady. + :type resize_errors: list[~azure.batch.models.ResizeError] + :param current_dedicated_nodes: The number of dedicated compute nodes + currently in the pool. + :type current_dedicated_nodes: int + :param current_low_priority_nodes: The number of low-priority compute + nodes currently in the pool. Low-priority compute nodes which have been + preempted are included in this count. + :type current_low_priority_nodes: int + :param target_dedicated_nodes: The desired number of dedicated compute + nodes in the pool. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + compute nodes in the pool. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + property is required and the pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: A formula for the desired number of compute + nodes in the pool. This property is set only if the pool automatically + scales, i.e. enableAutoScale is true. + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the pool size according to the autoscale formula. + This property is set only if the pool automatically scales, i.e. + enableAutoScale is true. + :type auto_scale_evaluation_interval: timedelta + :param auto_scale_run: The results and errors from the last execution of + the autoscale formula. This property is set only if the pool automatically + scales, i.e. enableAutoScale is true. + :type auto_scale_run: ~azure.batch.models.AutoScaleRun + :param enable_inter_node_communication: Whether the pool permits direct + communication between nodes. This imposes restrictions on which nodes can + be assigned to the pool. Specifying this value can reduce the chance of + the requested number of nodes to be allocated in the pool. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A task specified to run on each compute node as it + joins the pool. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: The list of certificates to be installed on + each compute node in the pool. For Windows compute nodes, the Batch + service installs the certificates to the specified certificate store and + location. For Linux compute nodes, the certificates are stored in a + directory inside the task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this + location. For certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of application packages to + be installed on each compute node in the pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each compute node in the pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + pool creation will fail. + :type application_licenses: list[str] + :param max_tasks_per_node: The maximum number of tasks that can run + concurrently on a single compute node in the pool. + :type max_tasks_per_node: int + :param task_scheduling_policy: How tasks are distributed across compute + nodes in a pool. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param user_accounts: The list of user accounts to be created on each node + in the pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the pool as + metadata. + :type metadata: list[~azure.batch.models.MetadataItem] + :param stats: Utilization and resource usage statistics for the entire + lifetime of the pool. The statistics may not be immediately available. The + Batch service performs periodic roll-up of statistics. The typical delay + is about 30 minutes. + :type stats: ~azure.batch.models.PoolStatistics + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'PoolState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'allocation_state': {'key': 'allocationState', 'type': 'AllocationState'}, + 'allocation_state_transition_time': {'key': 'allocationStateTransitionTime', 'type': 'iso-8601'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'resize_errors': {'key': 'resizeErrors', 'type': '[ResizeError]'}, + 'current_dedicated_nodes': {'key': 'currentDedicatedNodes', 'type': 'int'}, + 'current_low_priority_nodes': {'key': 'currentLowPriorityNodes', 'type': 'int'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'auto_scale_run': {'key': 'autoScaleRun', 'type': 'AutoScaleRun'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'stats': {'key': 'stats', 'type': 'PoolStatistics'}, + } + + def __init__(self, **kwargs): + super(CloudPool, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.url = kwargs.get('url', None) + self.e_tag = kwargs.get('e_tag', None) + self.last_modified = kwargs.get('last_modified', None) + self.creation_time = kwargs.get('creation_time', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.allocation_state = kwargs.get('allocation_state', None) + self.allocation_state_transition_time = kwargs.get('allocation_state_transition_time', None) + self.vm_size = kwargs.get('vm_size', None) + self.cloud_service_configuration = kwargs.get('cloud_service_configuration', None) + self.virtual_machine_configuration = kwargs.get('virtual_machine_configuration', None) + self.resize_timeout = kwargs.get('resize_timeout', None) + self.resize_errors = kwargs.get('resize_errors', None) + self.current_dedicated_nodes = kwargs.get('current_dedicated_nodes', None) + self.current_low_priority_nodes = kwargs.get('current_low_priority_nodes', None) + self.target_dedicated_nodes = kwargs.get('target_dedicated_nodes', None) + self.target_low_priority_nodes = kwargs.get('target_low_priority_nodes', None) + self.enable_auto_scale = kwargs.get('enable_auto_scale', None) + self.auto_scale_formula = kwargs.get('auto_scale_formula', None) + self.auto_scale_evaluation_interval = kwargs.get('auto_scale_evaluation_interval', None) + self.auto_scale_run = kwargs.get('auto_scale_run', None) + self.enable_inter_node_communication = kwargs.get('enable_inter_node_communication', None) + self.network_configuration = kwargs.get('network_configuration', None) + self.start_task = kwargs.get('start_task', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.application_licenses = kwargs.get('application_licenses', None) + self.max_tasks_per_node = kwargs.get('max_tasks_per_node', None) + self.task_scheduling_policy = kwargs.get('task_scheduling_policy', None) + self.user_accounts = kwargs.get('user_accounts', None) + self.metadata = kwargs.get('metadata', None) + self.stats = kwargs.get('stats', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/cloud_pool_paged.py b/azext/generated/sdk/batch/v2018_08_01/models/cloud_pool_paged.py new file mode 100644 index 00000000..c23eb7cd --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/cloud_pool_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class CloudPoolPaged(Paged): + """ + A paging container for iterating over a list of :class:`CloudPool ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[CloudPool]'} + } + + def __init__(self, *args, **kwargs): + + super(CloudPoolPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/cloud_pool_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/cloud_pool_py3.py new file mode 100644 index 00000000..b1166d42 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/cloud_pool_py3.py @@ -0,0 +1,237 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudPool(Model): + """A pool in the Azure Batch service. + + :param id: A string that uniquely identifies the pool within the account. + The ID can contain any combination of alphanumeric characters including + hyphens and underscores, and cannot contain more than 64 characters. The + ID is case-preserving and case-insensitive (that is, you may not have two + IDs within an account that differ only by case). + :type id: str + :param display_name: The display name for the pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param url: The URL of the pool. + :type url: str + :param e_tag: The ETag of the pool. This is an opaque string. You can use + it to detect whether the pool has changed between requests. In particular, + you can be pass the ETag when updating a pool to specify that your changes + should take effect only if nobody else has modified the pool in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the pool. This is the last + time at which the pool level data, such as the targetDedicatedNodes or + enableAutoscale settings, changed. It does not factor in node-level + changes such as a compute node changing state. + :type last_modified: datetime + :param creation_time: The creation time of the pool. + :type creation_time: datetime + :param state: The current state of the pool. Possible values include: + 'active', 'deleting', 'upgrading' + :type state: str or ~azure.batch.models.PoolState + :param state_transition_time: The time at which the pool entered its + current state. + :type state_transition_time: datetime + :param allocation_state: Whether the pool is resizing. Possible values + include: 'steady', 'resizing', 'stopping' + :type allocation_state: str or ~azure.batch.models.AllocationState + :param allocation_state_transition_time: The time at which the pool + entered its current allocation state. + :type allocation_state_transition_time: datetime + :param vm_size: The size of virtual machines in the pool. All virtual + machines in a pool are the same size. For information about available + sizes of virtual machines in pools, see Choose a VM size for compute nodes + in an Azure Batch pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the pool. This property and virtualMachineConfiguration are mutually + exclusive and one of the properties must be specified. This property + cannot be specified if the Batch account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the pool. This property and cloudServiceConfiguration are mutually + exclusive and one of the properties must be specified. + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param resize_timeout: The timeout for allocation of compute nodes to the + pool. This is the timeout for the most recent resize operation. (The + initial sizing when the pool is created counts as a resize.) The default + value is 15 minutes. + :type resize_timeout: timedelta + :param resize_errors: A list of errors encountered while performing the + last resize on the pool. This property is set only if one or more errors + occurred during the last pool resize, and only when the pool + allocationState is Steady. + :type resize_errors: list[~azure.batch.models.ResizeError] + :param current_dedicated_nodes: The number of dedicated compute nodes + currently in the pool. + :type current_dedicated_nodes: int + :param current_low_priority_nodes: The number of low-priority compute + nodes currently in the pool. Low-priority compute nodes which have been + preempted are included in this count. + :type current_low_priority_nodes: int + :param target_dedicated_nodes: The desired number of dedicated compute + nodes in the pool. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + compute nodes in the pool. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + property is required and the pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: A formula for the desired number of compute + nodes in the pool. This property is set only if the pool automatically + scales, i.e. enableAutoScale is true. + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the pool size according to the autoscale formula. + This property is set only if the pool automatically scales, i.e. + enableAutoScale is true. + :type auto_scale_evaluation_interval: timedelta + :param auto_scale_run: The results and errors from the last execution of + the autoscale formula. This property is set only if the pool automatically + scales, i.e. enableAutoScale is true. + :type auto_scale_run: ~azure.batch.models.AutoScaleRun + :param enable_inter_node_communication: Whether the pool permits direct + communication between nodes. This imposes restrictions on which nodes can + be assigned to the pool. Specifying this value can reduce the chance of + the requested number of nodes to be allocated in the pool. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A task specified to run on each compute node as it + joins the pool. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: The list of certificates to be installed on + each compute node in the pool. For Windows compute nodes, the Batch + service installs the certificates to the specified certificate store and + location. For Linux compute nodes, the certificates are stored in a + directory inside the task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this + location. For certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of application packages to + be installed on each compute node in the pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each compute node in the pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + pool creation will fail. + :type application_licenses: list[str] + :param max_tasks_per_node: The maximum number of tasks that can run + concurrently on a single compute node in the pool. + :type max_tasks_per_node: int + :param task_scheduling_policy: How tasks are distributed across compute + nodes in a pool. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param user_accounts: The list of user accounts to be created on each node + in the pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the pool as + metadata. + :type metadata: list[~azure.batch.models.MetadataItem] + :param stats: Utilization and resource usage statistics for the entire + lifetime of the pool. The statistics may not be immediately available. The + Batch service performs periodic roll-up of statistics. The typical delay + is about 30 minutes. + :type stats: ~azure.batch.models.PoolStatistics + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'PoolState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'allocation_state': {'key': 'allocationState', 'type': 'AllocationState'}, + 'allocation_state_transition_time': {'key': 'allocationStateTransitionTime', 'type': 'iso-8601'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'resize_errors': {'key': 'resizeErrors', 'type': '[ResizeError]'}, + 'current_dedicated_nodes': {'key': 'currentDedicatedNodes', 'type': 'int'}, + 'current_low_priority_nodes': {'key': 'currentLowPriorityNodes', 'type': 'int'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'auto_scale_run': {'key': 'autoScaleRun', 'type': 'AutoScaleRun'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'stats': {'key': 'stats', 'type': 'PoolStatistics'}, + } + + def __init__(self, *, id: str=None, display_name: str=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, allocation_state=None, allocation_state_transition_time=None, vm_size: str=None, cloud_service_configuration=None, virtual_machine_configuration=None, resize_timeout=None, resize_errors=None, current_dedicated_nodes: int=None, current_low_priority_nodes: int=None, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, enable_auto_scale: bool=None, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, auto_scale_run=None, enable_inter_node_communication: bool=None, network_configuration=None, start_task=None, certificate_references=None, application_package_references=None, application_licenses=None, max_tasks_per_node: int=None, task_scheduling_policy=None, user_accounts=None, metadata=None, stats=None, **kwargs) -> None: + super(CloudPool, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.url = url + self.e_tag = e_tag + self.last_modified = last_modified + self.creation_time = creation_time + self.state = state + self.state_transition_time = state_transition_time + self.allocation_state = allocation_state + self.allocation_state_transition_time = allocation_state_transition_time + self.vm_size = vm_size + self.cloud_service_configuration = cloud_service_configuration + self.virtual_machine_configuration = virtual_machine_configuration + self.resize_timeout = resize_timeout + self.resize_errors = resize_errors + self.current_dedicated_nodes = current_dedicated_nodes + self.current_low_priority_nodes = current_low_priority_nodes + self.target_dedicated_nodes = target_dedicated_nodes + self.target_low_priority_nodes = target_low_priority_nodes + self.enable_auto_scale = enable_auto_scale + self.auto_scale_formula = auto_scale_formula + self.auto_scale_evaluation_interval = auto_scale_evaluation_interval + self.auto_scale_run = auto_scale_run + self.enable_inter_node_communication = enable_inter_node_communication + self.network_configuration = network_configuration + self.start_task = start_task + self.certificate_references = certificate_references + self.application_package_references = application_package_references + self.application_licenses = application_licenses + self.max_tasks_per_node = max_tasks_per_node + self.task_scheduling_policy = task_scheduling_policy + self.user_accounts = user_accounts + self.metadata = metadata + self.stats = stats diff --git a/azext/generated/sdk/batch/v2018_08_01/models/cloud_service_configuration.py b/azext/generated/sdk/batch/v2018_08_01/models/cloud_service_configuration.py new file mode 100644 index 00000000..d86bae40 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/cloud_service_configuration.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudServiceConfiguration(Model): + """The configuration for nodes in a pool based on the Azure Cloud Services + platform. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :param os_family: Required. The Azure Guest OS family to be installed on + the virtual machines in the pool. Possible values are: + 2 - OS Family 2, equivalent to Windows Server 2008 R2 SP1. + 3 - OS Family 3, equivalent to Windows Server 2012. + 4 - OS Family 4, equivalent to Windows Server 2012 R2. + 5 - OS Family 5, equivalent to Windows Server 2016. For more information, + see Azure Guest OS Releases + (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). + :type os_family: str + :param target_os_version: The Azure Guest OS version to be installed on + the virtual machines in the pool. The default value is * which specifies + the latest operating system version for the specified OS family. + :type target_os_version: str + :ivar current_os_version: The Azure Guest OS Version currently installed + on the virtual machines in the pool. This may differ from targetOSVersion + if the pool state is Upgrading. In this case some virtual machines may be + on the targetOSVersion and some may be on the currentOSVersion during the + upgrade process. Once all virtual machines have upgraded, currentOSVersion + is updated to be the same as targetOSVersion. + :vartype current_os_version: str + """ + + _validation = { + 'os_family': {'required': True}, + 'current_os_version': {'readonly': True}, + } + + _attribute_map = { + 'os_family': {'key': 'osFamily', 'type': 'str'}, + 'target_os_version': {'key': 'targetOSVersion', 'type': 'str'}, + 'current_os_version': {'key': 'currentOSVersion', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(CloudServiceConfiguration, self).__init__(**kwargs) + self.os_family = kwargs.get('os_family', None) + self.target_os_version = kwargs.get('target_os_version', None) + self.current_os_version = None diff --git a/azext/generated/sdk/batch/v2018_08_01/models/cloud_service_configuration_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/cloud_service_configuration_py3.py new file mode 100644 index 00000000..eb53f78f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/cloud_service_configuration_py3.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudServiceConfiguration(Model): + """The configuration for nodes in a pool based on the Azure Cloud Services + platform. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :param os_family: Required. The Azure Guest OS family to be installed on + the virtual machines in the pool. Possible values are: + 2 - OS Family 2, equivalent to Windows Server 2008 R2 SP1. + 3 - OS Family 3, equivalent to Windows Server 2012. + 4 - OS Family 4, equivalent to Windows Server 2012 R2. + 5 - OS Family 5, equivalent to Windows Server 2016. For more information, + see Azure Guest OS Releases + (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). + :type os_family: str + :param target_os_version: The Azure Guest OS version to be installed on + the virtual machines in the pool. The default value is * which specifies + the latest operating system version for the specified OS family. + :type target_os_version: str + :ivar current_os_version: The Azure Guest OS Version currently installed + on the virtual machines in the pool. This may differ from targetOSVersion + if the pool state is Upgrading. In this case some virtual machines may be + on the targetOSVersion and some may be on the currentOSVersion during the + upgrade process. Once all virtual machines have upgraded, currentOSVersion + is updated to be the same as targetOSVersion. + :vartype current_os_version: str + """ + + _validation = { + 'os_family': {'required': True}, + 'current_os_version': {'readonly': True}, + } + + _attribute_map = { + 'os_family': {'key': 'osFamily', 'type': 'str'}, + 'target_os_version': {'key': 'targetOSVersion', 'type': 'str'}, + 'current_os_version': {'key': 'currentOSVersion', 'type': 'str'}, + } + + def __init__(self, *, os_family: str, target_os_version: str=None, **kwargs) -> None: + super(CloudServiceConfiguration, self).__init__(**kwargs) + self.os_family = os_family + self.target_os_version = target_os_version + self.current_os_version = None diff --git a/azext/generated/sdk/batch/v2018_08_01/models/cloud_task.py b/azext/generated/sdk/batch/v2018_08_01/models/cloud_task.py new file mode 100644 index 00000000..df9e3677 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/cloud_task.py @@ -0,0 +1,208 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudTask(Model): + """An Azure Batch task. + + Batch will retry tasks when a recovery operation is triggered on a compute + node. Examples of recovery operations include (but are not limited to) when + an unhealthy compute node is rebooted or a compute node disappeared due to + host failure. Retries due to recovery operations are independent of and are + not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is + 0, an internal retry due to a recovery operation may occur. Because of + this, all tasks should be idempotent. This means tasks need to tolerate + being interrupted and restarted without causing any corruption or duplicate + data. The best practice for long running tasks is to use some form of + checkpointing. + + :param id: A string that uniquely identifies the task within the job. The + ID can contain any combination of alphanumeric characters including + hyphens and underscores, and cannot contain more than 64 characters. + :type id: str + :param display_name: A display name for the task. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param url: The URL of the task. + :type url: str + :param e_tag: The ETag of the task. This is an opaque string. You can use + it to detect whether the task has changed between requests. In particular, + you can be pass the ETag when updating a task to specify that your changes + should take effect only if nobody else has modified the task in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the task. + :type last_modified: datetime + :param creation_time: The creation time of the task. + :type creation_time: datetime + :param exit_conditions: How the Batch service should respond when the task + completes. + :type exit_conditions: ~azure.batch.models.ExitConditions + :param state: The current state of the task. Possible values include: + 'active', 'preparing', 'running', 'completed' + :type state: str or ~azure.batch.models.TaskState + :param state_transition_time: The time at which the task entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the task. This property is + not set if the task is in its initial Active state. Possible values + include: 'active', 'preparing', 'running', 'completed' + :type previous_state: str or ~azure.batch.models.TaskState + :param previous_state_transition_time: The time at which the task entered + its previous state. This property is not set if the task is in its initial + Active state. + :type previous_state_transition_time: datetime + :param command_line: The command line of the task. For multi-instance + tasks, the command line is executed as the primary task, after the primary + task and all subtasks have finished executing the coordination command + line. The command line does not run under a shell, and therefore cannot + take advantage of shell features such as environment variable expansion. + If you want to take advantage of such features, you should invoke the + shell in the command line, for example using "cmd /c MyCommand" in Windows + or "/bin/sh -c MyCommand" in Linux. If the command line refers to file + paths, it should use a relative path (relative to the task working + directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + task runs. If the pool that will run this task has containerConfiguration + set, this must be set as well. If the pool that will run this task doesn't + have containerConfiguration set, this must not be set. When this is + specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR + (the root of Azure Batch directories on the node) are mapped into the + container, all task environment variables are mapped into the container, + and the task command line is executed in the container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. For + multi-instance tasks, the resource files will only be downloaded to the + compute node on which the primary task is executed. There is a maximum + size for the list of resource files. When the max size is exceeded, the + request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the compute node after running the command line. For multi-instance + tasks, the files will only be uploaded from the compute node on which the + primary task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param affinity_info: A locality hint that can be used by the Batch + service to select a compute node on which to start the new task. + :type affinity_info: ~azure.batch.models.AffinityInformation + :param constraints: The execution constraints that apply to this task. + :type constraints: ~azure.batch.models.TaskConstraints + :param user_identity: The user identity under which the task runs. If + omitted, the task runs as a non-administrative user unique to the task. + :type user_identity: ~azure.batch.models.UserIdentity + :param execution_info: Information about the execution of the task. + :type execution_info: ~azure.batch.models.TaskExecutionInformation + :param node_info: Information about the compute node on which the task + ran. + :type node_info: ~azure.batch.models.ComputeNodeInformation + :param multi_instance_settings: An object that indicates that the task is + a multi-instance task, and contains information about how to run the + multi-instance task. + :type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings + :param stats: Resource usage statistics for the task. + :type stats: ~azure.batch.models.TaskStatistics + :param depends_on: The tasks that this task depends on. This task will not + be scheduled until all tasks that it depends on have completed + successfully. If any of those tasks fail and exhaust their retry counts, + this task will never be scheduled. + :type depends_on: ~azure.batch.models.TaskDependencies + :param application_package_references: A list of application packages that + the Batch service will deploy to the compute node before running the + command line. Application packages are downloaded and deployed to a shared + directory, not the task working directory. Therefore, if a referenced + package is already on the compute node, and is up to date, then it is not + re-downloaded; the existing copy on the compute node is used. If a + referenced application package cannot be installed, for example because + the package has been deleted or because download failed, the task fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the task can use to perform Batch service operations. If this + property is set, the Batch service provides the task with an + authentication token which can be used to authenticate Batch service + operations without requiring an account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the task can carry out using the token depend on the settings. For + example, a task can request job permissions in order to add other tasks to + the job, or check the status of the job or of other tasks under the job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'}, + 'state': {'key': 'state', 'type': 'TaskState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'TaskState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'execution_info': {'key': 'executionInfo', 'type': 'TaskExecutionInformation'}, + 'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'}, + 'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'}, + 'stats': {'key': 'stats', 'type': 'TaskStatistics'}, + 'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + } + + def __init__(self, **kwargs): + super(CloudTask, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.url = kwargs.get('url', None) + self.e_tag = kwargs.get('e_tag', None) + self.last_modified = kwargs.get('last_modified', None) + self.creation_time = kwargs.get('creation_time', None) + self.exit_conditions = kwargs.get('exit_conditions', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.previous_state = kwargs.get('previous_state', None) + self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.resource_files = kwargs.get('resource_files', None) + self.output_files = kwargs.get('output_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.affinity_info = kwargs.get('affinity_info', None) + self.constraints = kwargs.get('constraints', None) + self.user_identity = kwargs.get('user_identity', None) + self.execution_info = kwargs.get('execution_info', None) + self.node_info = kwargs.get('node_info', None) + self.multi_instance_settings = kwargs.get('multi_instance_settings', None) + self.stats = kwargs.get('stats', None) + self.depends_on = kwargs.get('depends_on', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.authentication_token_settings = kwargs.get('authentication_token_settings', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/cloud_task_list_subtasks_result.py b/azext/generated/sdk/batch/v2018_08_01/models/cloud_task_list_subtasks_result.py new file mode 100644 index 00000000..c592b348 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/cloud_task_list_subtasks_result.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudTaskListSubtasksResult(Model): + """The result of listing the subtasks of a task. + + :param value: The list of subtasks. + :type value: list[~azure.batch.models.SubtaskInformation] + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[SubtaskInformation]'}, + } + + def __init__(self, **kwargs): + super(CloudTaskListSubtasksResult, self).__init__(**kwargs) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/cloud_task_list_subtasks_result_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/cloud_task_list_subtasks_result_py3.py new file mode 100644 index 00000000..f21e7260 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/cloud_task_list_subtasks_result_py3.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudTaskListSubtasksResult(Model): + """The result of listing the subtasks of a task. + + :param value: The list of subtasks. + :type value: list[~azure.batch.models.SubtaskInformation] + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[SubtaskInformation]'}, + } + + def __init__(self, *, value=None, **kwargs) -> None: + super(CloudTaskListSubtasksResult, self).__init__(**kwargs) + self.value = value diff --git a/azext/generated/sdk/batch/v2018_08_01/models/cloud_task_paged.py b/azext/generated/sdk/batch/v2018_08_01/models/cloud_task_paged.py new file mode 100644 index 00000000..3d8ef774 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/cloud_task_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class CloudTaskPaged(Paged): + """ + A paging container for iterating over a list of :class:`CloudTask ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[CloudTask]'} + } + + def __init__(self, *args, **kwargs): + + super(CloudTaskPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/cloud_task_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/cloud_task_py3.py new file mode 100644 index 00000000..95928544 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/cloud_task_py3.py @@ -0,0 +1,208 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudTask(Model): + """An Azure Batch task. + + Batch will retry tasks when a recovery operation is triggered on a compute + node. Examples of recovery operations include (but are not limited to) when + an unhealthy compute node is rebooted or a compute node disappeared due to + host failure. Retries due to recovery operations are independent of and are + not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is + 0, an internal retry due to a recovery operation may occur. Because of + this, all tasks should be idempotent. This means tasks need to tolerate + being interrupted and restarted without causing any corruption or duplicate + data. The best practice for long running tasks is to use some form of + checkpointing. + + :param id: A string that uniquely identifies the task within the job. The + ID can contain any combination of alphanumeric characters including + hyphens and underscores, and cannot contain more than 64 characters. + :type id: str + :param display_name: A display name for the task. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param url: The URL of the task. + :type url: str + :param e_tag: The ETag of the task. This is an opaque string. You can use + it to detect whether the task has changed between requests. In particular, + you can be pass the ETag when updating a task to specify that your changes + should take effect only if nobody else has modified the task in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the task. + :type last_modified: datetime + :param creation_time: The creation time of the task. + :type creation_time: datetime + :param exit_conditions: How the Batch service should respond when the task + completes. + :type exit_conditions: ~azure.batch.models.ExitConditions + :param state: The current state of the task. Possible values include: + 'active', 'preparing', 'running', 'completed' + :type state: str or ~azure.batch.models.TaskState + :param state_transition_time: The time at which the task entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the task. This property is + not set if the task is in its initial Active state. Possible values + include: 'active', 'preparing', 'running', 'completed' + :type previous_state: str or ~azure.batch.models.TaskState + :param previous_state_transition_time: The time at which the task entered + its previous state. This property is not set if the task is in its initial + Active state. + :type previous_state_transition_time: datetime + :param command_line: The command line of the task. For multi-instance + tasks, the command line is executed as the primary task, after the primary + task and all subtasks have finished executing the coordination command + line. The command line does not run under a shell, and therefore cannot + take advantage of shell features such as environment variable expansion. + If you want to take advantage of such features, you should invoke the + shell in the command line, for example using "cmd /c MyCommand" in Windows + or "/bin/sh -c MyCommand" in Linux. If the command line refers to file + paths, it should use a relative path (relative to the task working + directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + task runs. If the pool that will run this task has containerConfiguration + set, this must be set as well. If the pool that will run this task doesn't + have containerConfiguration set, this must not be set. When this is + specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR + (the root of Azure Batch directories on the node) are mapped into the + container, all task environment variables are mapped into the container, + and the task command line is executed in the container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. For + multi-instance tasks, the resource files will only be downloaded to the + compute node on which the primary task is executed. There is a maximum + size for the list of resource files. When the max size is exceeded, the + request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the compute node after running the command line. For multi-instance + tasks, the files will only be uploaded from the compute node on which the + primary task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param affinity_info: A locality hint that can be used by the Batch + service to select a compute node on which to start the new task. + :type affinity_info: ~azure.batch.models.AffinityInformation + :param constraints: The execution constraints that apply to this task. + :type constraints: ~azure.batch.models.TaskConstraints + :param user_identity: The user identity under which the task runs. If + omitted, the task runs as a non-administrative user unique to the task. + :type user_identity: ~azure.batch.models.UserIdentity + :param execution_info: Information about the execution of the task. + :type execution_info: ~azure.batch.models.TaskExecutionInformation + :param node_info: Information about the compute node on which the task + ran. + :type node_info: ~azure.batch.models.ComputeNodeInformation + :param multi_instance_settings: An object that indicates that the task is + a multi-instance task, and contains information about how to run the + multi-instance task. + :type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings + :param stats: Resource usage statistics for the task. + :type stats: ~azure.batch.models.TaskStatistics + :param depends_on: The tasks that this task depends on. This task will not + be scheduled until all tasks that it depends on have completed + successfully. If any of those tasks fail and exhaust their retry counts, + this task will never be scheduled. + :type depends_on: ~azure.batch.models.TaskDependencies + :param application_package_references: A list of application packages that + the Batch service will deploy to the compute node before running the + command line. Application packages are downloaded and deployed to a shared + directory, not the task working directory. Therefore, if a referenced + package is already on the compute node, and is up to date, then it is not + re-downloaded; the existing copy on the compute node is used. If a + referenced application package cannot be installed, for example because + the package has been deleted or because download failed, the task fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the task can use to perform Batch service operations. If this + property is set, the Batch service provides the task with an + authentication token which can be used to authenticate Batch service + operations without requiring an account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the task can carry out using the token depend on the settings. For + example, a task can request job permissions in order to add other tasks to + the job, or check the status of the job or of other tasks under the job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'}, + 'state': {'key': 'state', 'type': 'TaskState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'TaskState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'execution_info': {'key': 'executionInfo', 'type': 'TaskExecutionInformation'}, + 'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'}, + 'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'}, + 'stats': {'key': 'stats', 'type': 'TaskStatistics'}, + 'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + } + + def __init__(self, *, id: str=None, display_name: str=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, exit_conditions=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, command_line: str=None, container_settings=None, resource_files=None, output_files=None, environment_settings=None, affinity_info=None, constraints=None, user_identity=None, execution_info=None, node_info=None, multi_instance_settings=None, stats=None, depends_on=None, application_package_references=None, authentication_token_settings=None, **kwargs) -> None: + super(CloudTask, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.url = url + self.e_tag = e_tag + self.last_modified = last_modified + self.creation_time = creation_time + self.exit_conditions = exit_conditions + self.state = state + self.state_transition_time = state_transition_time + self.previous_state = previous_state + self.previous_state_transition_time = previous_state_transition_time + self.command_line = command_line + self.container_settings = container_settings + self.resource_files = resource_files + self.output_files = output_files + self.environment_settings = environment_settings + self.affinity_info = affinity_info + self.constraints = constraints + self.user_identity = user_identity + self.execution_info = execution_info + self.node_info = node_info + self.multi_instance_settings = multi_instance_settings + self.stats = stats + self.depends_on = depends_on + self.application_package_references = application_package_references + self.authentication_token_settings = authentication_token_settings diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node.py new file mode 100644 index 00000000..691adc5b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node.py @@ -0,0 +1,155 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNode(Model): + """A compute node in the Batch service. + + :param id: The ID of the compute node. Every node that is added to a pool + is assigned a unique ID. Whenever a node is removed from a pool, all of + its local files are deleted, and the ID is reclaimed and could be reused + for new nodes. + :type id: str + :param url: The URL of the compute node. + :type url: str + :param state: The current state of the compute node. The low-priority node + has been preempted. Tasks which were running on the node when it was + preempted will be rescheduled when another node becomes available. + Possible values include: 'idle', 'rebooting', 'reimaging', 'running', + 'unusable', 'creating', 'starting', 'waitingForStartTask', + 'startTaskFailed', 'unknown', 'leavingPool', 'offline', 'preempted' + :type state: str or ~azure.batch.models.ComputeNodeState + :param scheduling_state: Whether the compute node is available for task + scheduling. Possible values include: 'enabled', 'disabled' + :type scheduling_state: str or ~azure.batch.models.SchedulingState + :param state_transition_time: The time at which the compute node entered + its current state. + :type state_transition_time: datetime + :param last_boot_time: The time at which the compute node was started. + This property may not be present if the node state is unusable. + :type last_boot_time: datetime + :param allocation_time: The time at which this compute node was allocated + to the pool. + :type allocation_time: datetime + :param ip_address: The IP address that other compute nodes can use to + communicate with this compute node. Every node that is added to a pool is + assigned a unique IP address. Whenever a node is removed from a pool, all + of its local files are deleted, and the IP address is reclaimed and could + be reused for new nodes. + :type ip_address: str + :param affinity_id: An identifier which can be passed when adding a task + to request that the task be scheduled on this node. Note that this is just + a soft affinity. If the target node is busy or unavailable at the time the + task is scheduled, then the task will be scheduled elsewhere. + :type affinity_id: str + :param vm_size: The size of the virtual machine hosting the compute node. + For information about available sizes of virtual machines in pools, see + Choose a VM size for compute nodes in an Azure Batch pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param total_tasks_run: The total number of job tasks completed on the + compute node. This includes Job Manager tasks and normal tasks, but not + Job Preparation, Job Release or Start tasks. + :type total_tasks_run: int + :param running_tasks_count: The total number of currently running job + tasks on the compute node. This includes Job Manager tasks and normal + tasks, but not Job Preparation, Job Release or Start tasks. + :type running_tasks_count: int + :param total_tasks_succeeded: The total number of job tasks which + completed successfully (with exitCode 0) on the compute node. This + includes Job Manager tasks and normal tasks, but not Job Preparation, Job + Release or Start tasks. + :type total_tasks_succeeded: int + :param recent_tasks: A list of tasks whose state has recently changed. + This property is present only if at least one task has run on this node + since it was assigned to the pool. + :type recent_tasks: list[~azure.batch.models.TaskInformation] + :param start_task: The task specified to run on the compute node as it + joins the pool. + :type start_task: ~azure.batch.models.StartTask + :param start_task_info: Runtime information about the execution of the + start task on the compute node. + :type start_task_info: ~azure.batch.models.StartTaskInformation + :param certificate_references: The list of certificates installed on the + compute node. For Windows compute nodes, the Batch service installs the + certificates to the specified certificate store and location. For Linux + compute nodes, the certificates are stored in a directory inside the task + working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is + supplied to the task to query for this location. For certificates with + visibility of 'remoteUser', a 'certs' directory is created in the user's + home directory (e.g., /home/{user-name}/certs) and certificates are placed + in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param errors: The list of errors that are currently being encountered by + the compute node. + :type errors: list[~azure.batch.models.ComputeNodeError] + :param is_dedicated: Whether this compute node is a dedicated node. If + false, the node is a low-priority node. + :type is_dedicated: bool + :param endpoint_configuration: The endpoint configuration for the compute + node. + :type endpoint_configuration: + ~azure.batch.models.ComputeNodeEndpointConfiguration + :param node_agent_info: Information about the node agent version and the + time the node upgraded to a new version. + :type node_agent_info: ~azure.batch.models.NodeAgentInformation + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'ComputeNodeState'}, + 'scheduling_state': {'key': 'schedulingState', 'type': 'SchedulingState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'last_boot_time': {'key': 'lastBootTime', 'type': 'iso-8601'}, + 'allocation_time': {'key': 'allocationTime', 'type': 'iso-8601'}, + 'ip_address': {'key': 'ipAddress', 'type': 'str'}, + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'total_tasks_run': {'key': 'totalTasksRun', 'type': 'int'}, + 'running_tasks_count': {'key': 'runningTasksCount', 'type': 'int'}, + 'total_tasks_succeeded': {'key': 'totalTasksSucceeded', 'type': 'int'}, + 'recent_tasks': {'key': 'recentTasks', 'type': '[TaskInformation]'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'start_task_info': {'key': 'startTaskInfo', 'type': 'StartTaskInformation'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'errors': {'key': 'errors', 'type': '[ComputeNodeError]'}, + 'is_dedicated': {'key': 'isDedicated', 'type': 'bool'}, + 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'ComputeNodeEndpointConfiguration'}, + 'node_agent_info': {'key': 'nodeAgentInfo', 'type': 'NodeAgentInformation'}, + } + + def __init__(self, **kwargs): + super(ComputeNode, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.url = kwargs.get('url', None) + self.state = kwargs.get('state', None) + self.scheduling_state = kwargs.get('scheduling_state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.last_boot_time = kwargs.get('last_boot_time', None) + self.allocation_time = kwargs.get('allocation_time', None) + self.ip_address = kwargs.get('ip_address', None) + self.affinity_id = kwargs.get('affinity_id', None) + self.vm_size = kwargs.get('vm_size', None) + self.total_tasks_run = kwargs.get('total_tasks_run', None) + self.running_tasks_count = kwargs.get('running_tasks_count', None) + self.total_tasks_succeeded = kwargs.get('total_tasks_succeeded', None) + self.recent_tasks = kwargs.get('recent_tasks', None) + self.start_task = kwargs.get('start_task', None) + self.start_task_info = kwargs.get('start_task_info', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.errors = kwargs.get('errors', None) + self.is_dedicated = kwargs.get('is_dedicated', None) + self.endpoint_configuration = kwargs.get('endpoint_configuration', None) + self.node_agent_info = kwargs.get('node_agent_info', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_add_user_options.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_add_user_options.py new file mode 100644 index 00000000..89020475 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_add_user_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeAddUserOptions(Model): + """Additional parameters for add_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeAddUserOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_add_user_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_add_user_options_py3.py new file mode 100644 index 00000000..dab4040b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_add_user_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeAddUserOptions(Model): + """Additional parameters for add_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeAddUserOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_delete_user_options.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_delete_user_options.py new file mode 100644 index 00000000..4874a98a --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_delete_user_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeDeleteUserOptions(Model): + """Additional parameters for delete_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeDeleteUserOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_delete_user_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_delete_user_options_py3.py new file mode 100644 index 00000000..88217b93 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_delete_user_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeDeleteUserOptions(Model): + """Additional parameters for delete_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeDeleteUserOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_disable_scheduling_options.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_disable_scheduling_options.py new file mode 100644 index 00000000..92bf2911 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_disable_scheduling_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeDisableSchedulingOptions(Model): + """Additional parameters for disable_scheduling operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeDisableSchedulingOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_disable_scheduling_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_disable_scheduling_options_py3.py new file mode 100644 index 00000000..0432c5db --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_disable_scheduling_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeDisableSchedulingOptions(Model): + """Additional parameters for disable_scheduling operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeDisableSchedulingOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_enable_scheduling_options.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_enable_scheduling_options.py new file mode 100644 index 00000000..905e3e34 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_enable_scheduling_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeEnableSchedulingOptions(Model): + """Additional parameters for enable_scheduling operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeEnableSchedulingOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_enable_scheduling_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_enable_scheduling_options_py3.py new file mode 100644 index 00000000..4ef5d9ad --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_enable_scheduling_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeEnableSchedulingOptions(Model): + """Additional parameters for enable_scheduling operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeEnableSchedulingOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_endpoint_configuration.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_endpoint_configuration.py new file mode 100644 index 00000000..922c5c10 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_endpoint_configuration.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeEndpointConfiguration(Model): + """The endpoint configuration for the compute node. + + All required parameters must be populated in order to send to Azure. + + :param inbound_endpoints: Required. The list of inbound endpoints that are + accessible on the compute node. + :type inbound_endpoints: list[~azure.batch.models.InboundEndpoint] + """ + + _validation = { + 'inbound_endpoints': {'required': True}, + } + + _attribute_map = { + 'inbound_endpoints': {'key': 'inboundEndpoints', 'type': '[InboundEndpoint]'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeEndpointConfiguration, self).__init__(**kwargs) + self.inbound_endpoints = kwargs.get('inbound_endpoints', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_endpoint_configuration_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_endpoint_configuration_py3.py new file mode 100644 index 00000000..72dc202e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_endpoint_configuration_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeEndpointConfiguration(Model): + """The endpoint configuration for the compute node. + + All required parameters must be populated in order to send to Azure. + + :param inbound_endpoints: Required. The list of inbound endpoints that are + accessible on the compute node. + :type inbound_endpoints: list[~azure.batch.models.InboundEndpoint] + """ + + _validation = { + 'inbound_endpoints': {'required': True}, + } + + _attribute_map = { + 'inbound_endpoints': {'key': 'inboundEndpoints', 'type': '[InboundEndpoint]'}, + } + + def __init__(self, *, inbound_endpoints, **kwargs) -> None: + super(ComputeNodeEndpointConfiguration, self).__init__(**kwargs) + self.inbound_endpoints = inbound_endpoints diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_error.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_error.py new file mode 100644 index 00000000..e02a6681 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_error.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeError(Model): + """An error encountered by a compute node. + + :param code: An identifier for the compute node error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the compute node error, intended to + be suitable for display in a user interface. + :type message: str + :param error_details: The list of additional error details related to the + compute node error. + :type error_details: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'error_details': {'key': 'errorDetails', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.error_details = kwargs.get('error_details', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_error_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_error_py3.py new file mode 100644 index 00000000..53a6871b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_error_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeError(Model): + """An error encountered by a compute node. + + :param code: An identifier for the compute node error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the compute node error, intended to + be suitable for display in a user interface. + :type message: str + :param error_details: The list of additional error details related to the + compute node error. + :type error_details: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'error_details': {'key': 'errorDetails', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, code: str=None, message: str=None, error_details=None, **kwargs) -> None: + super(ComputeNodeError, self).__init__(**kwargs) + self.code = code + self.message = message + self.error_details = error_details diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_options.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_options.py new file mode 100644 index 00000000..6218d444 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_options.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_options_py3.py new file mode 100644 index 00000000..de6284b3 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_options_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeGetOptions, self).__init__(**kwargs) + self.select = select + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_remote_desktop_options.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_remote_desktop_options.py new file mode 100644 index 00000000..20af5558 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_remote_desktop_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteDesktopOptions(Model): + """Additional parameters for get_remote_desktop operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeGetRemoteDesktopOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_remote_desktop_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_remote_desktop_options_py3.py new file mode 100644 index 00000000..d79ce622 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_remote_desktop_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteDesktopOptions(Model): + """Additional parameters for get_remote_desktop operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeGetRemoteDesktopOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_remote_login_settings_options.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_remote_login_settings_options.py new file mode 100644 index 00000000..9c01ed5f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_remote_login_settings_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteLoginSettingsOptions(Model): + """Additional parameters for get_remote_login_settings operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeGetRemoteLoginSettingsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_remote_login_settings_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_remote_login_settings_options_py3.py new file mode 100644 index 00000000..2d7987ab --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_remote_login_settings_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteLoginSettingsOptions(Model): + """Additional parameters for get_remote_login_settings operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeGetRemoteLoginSettingsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_remote_login_settings_result.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_remote_login_settings_result.py new file mode 100644 index 00000000..2d4e378c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_remote_login_settings_result.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteLoginSettingsResult(Model): + """The remote login settings for a compute node. + + All required parameters must be populated in order to send to Azure. + + :param remote_login_ip_address: Required. The IP address used for remote + login to the compute node. + :type remote_login_ip_address: str + :param remote_login_port: Required. The port used for remote login to the + compute node. + :type remote_login_port: int + """ + + _validation = { + 'remote_login_ip_address': {'required': True}, + 'remote_login_port': {'required': True}, + } + + _attribute_map = { + 'remote_login_ip_address': {'key': 'remoteLoginIPAddress', 'type': 'str'}, + 'remote_login_port': {'key': 'remoteLoginPort', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeGetRemoteLoginSettingsResult, self).__init__(**kwargs) + self.remote_login_ip_address = kwargs.get('remote_login_ip_address', None) + self.remote_login_port = kwargs.get('remote_login_port', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_remote_login_settings_result_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_remote_login_settings_result_py3.py new file mode 100644 index 00000000..c13ace1c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_get_remote_login_settings_result_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteLoginSettingsResult(Model): + """The remote login settings for a compute node. + + All required parameters must be populated in order to send to Azure. + + :param remote_login_ip_address: Required. The IP address used for remote + login to the compute node. + :type remote_login_ip_address: str + :param remote_login_port: Required. The port used for remote login to the + compute node. + :type remote_login_port: int + """ + + _validation = { + 'remote_login_ip_address': {'required': True}, + 'remote_login_port': {'required': True}, + } + + _attribute_map = { + 'remote_login_ip_address': {'key': 'remoteLoginIPAddress', 'type': 'str'}, + 'remote_login_port': {'key': 'remoteLoginPort', 'type': 'int'}, + } + + def __init__(self, *, remote_login_ip_address: str, remote_login_port: int, **kwargs) -> None: + super(ComputeNodeGetRemoteLoginSettingsResult, self).__init__(**kwargs) + self.remote_login_ip_address = remote_login_ip_address + self.remote_login_port = remote_login_port diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_information.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_information.py new file mode 100644 index 00000000..9c6a3421 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_information.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeInformation(Model): + """Information about the compute node on which a task ran. + + :param affinity_id: An identifier for the compute node on which the task + ran, which can be passed when adding a task to request that the task be + scheduled on this compute node. + :type affinity_id: str + :param node_url: The URL of the node on which the task ran. . + :type node_url: str + :param pool_id: The ID of the pool on which the task ran. + :type pool_id: str + :param node_id: The ID of the node on which the task ran. + :type node_id: str + :param task_root_directory: The root directory of the task on the compute + node. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the task + on the compute node. + :type task_root_directory_url: str + """ + + _attribute_map = { + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + 'node_url': {'key': 'nodeUrl', 'type': 'str'}, + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'node_id': {'key': 'nodeId', 'type': 'str'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeInformation, self).__init__(**kwargs) + self.affinity_id = kwargs.get('affinity_id', None) + self.node_url = kwargs.get('node_url', None) + self.pool_id = kwargs.get('pool_id', None) + self.node_id = kwargs.get('node_id', None) + self.task_root_directory = kwargs.get('task_root_directory', None) + self.task_root_directory_url = kwargs.get('task_root_directory_url', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_information_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_information_py3.py new file mode 100644 index 00000000..7c82bde1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_information_py3.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeInformation(Model): + """Information about the compute node on which a task ran. + + :param affinity_id: An identifier for the compute node on which the task + ran, which can be passed when adding a task to request that the task be + scheduled on this compute node. + :type affinity_id: str + :param node_url: The URL of the node on which the task ran. . + :type node_url: str + :param pool_id: The ID of the pool on which the task ran. + :type pool_id: str + :param node_id: The ID of the node on which the task ran. + :type node_id: str + :param task_root_directory: The root directory of the task on the compute + node. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the task + on the compute node. + :type task_root_directory_url: str + """ + + _attribute_map = { + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + 'node_url': {'key': 'nodeUrl', 'type': 'str'}, + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'node_id': {'key': 'nodeId', 'type': 'str'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + } + + def __init__(self, *, affinity_id: str=None, node_url: str=None, pool_id: str=None, node_id: str=None, task_root_directory: str=None, task_root_directory_url: str=None, **kwargs) -> None: + super(ComputeNodeInformation, self).__init__(**kwargs) + self.affinity_id = affinity_id + self.node_url = node_url + self.pool_id = pool_id + self.node_id = node_id + self.task_root_directory = task_root_directory + self.task_root_directory_url = task_root_directory_url diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_list_options.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_list_options.py new file mode 100644 index 00000000..b3cf782f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_list_options.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 nodes can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_list_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_list_options_py3.py new file mode 100644 index 00000000..e046429d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_list_options_py3.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 nodes can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_paged.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_paged.py new file mode 100644 index 00000000..26f41dcb --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class ComputeNodePaged(Paged): + """ + A paging container for iterating over a list of :class:`ComputeNode ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[ComputeNode]'} + } + + def __init__(self, *args, **kwargs): + + super(ComputeNodePaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_py3.py new file mode 100644 index 00000000..7b9c41a9 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_py3.py @@ -0,0 +1,155 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNode(Model): + """A compute node in the Batch service. + + :param id: The ID of the compute node. Every node that is added to a pool + is assigned a unique ID. Whenever a node is removed from a pool, all of + its local files are deleted, and the ID is reclaimed and could be reused + for new nodes. + :type id: str + :param url: The URL of the compute node. + :type url: str + :param state: The current state of the compute node. The low-priority node + has been preempted. Tasks which were running on the node when it was + preempted will be rescheduled when another node becomes available. + Possible values include: 'idle', 'rebooting', 'reimaging', 'running', + 'unusable', 'creating', 'starting', 'waitingForStartTask', + 'startTaskFailed', 'unknown', 'leavingPool', 'offline', 'preempted' + :type state: str or ~azure.batch.models.ComputeNodeState + :param scheduling_state: Whether the compute node is available for task + scheduling. Possible values include: 'enabled', 'disabled' + :type scheduling_state: str or ~azure.batch.models.SchedulingState + :param state_transition_time: The time at which the compute node entered + its current state. + :type state_transition_time: datetime + :param last_boot_time: The time at which the compute node was started. + This property may not be present if the node state is unusable. + :type last_boot_time: datetime + :param allocation_time: The time at which this compute node was allocated + to the pool. + :type allocation_time: datetime + :param ip_address: The IP address that other compute nodes can use to + communicate with this compute node. Every node that is added to a pool is + assigned a unique IP address. Whenever a node is removed from a pool, all + of its local files are deleted, and the IP address is reclaimed and could + be reused for new nodes. + :type ip_address: str + :param affinity_id: An identifier which can be passed when adding a task + to request that the task be scheduled on this node. Note that this is just + a soft affinity. If the target node is busy or unavailable at the time the + task is scheduled, then the task will be scheduled elsewhere. + :type affinity_id: str + :param vm_size: The size of the virtual machine hosting the compute node. + For information about available sizes of virtual machines in pools, see + Choose a VM size for compute nodes in an Azure Batch pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param total_tasks_run: The total number of job tasks completed on the + compute node. This includes Job Manager tasks and normal tasks, but not + Job Preparation, Job Release or Start tasks. + :type total_tasks_run: int + :param running_tasks_count: The total number of currently running job + tasks on the compute node. This includes Job Manager tasks and normal + tasks, but not Job Preparation, Job Release or Start tasks. + :type running_tasks_count: int + :param total_tasks_succeeded: The total number of job tasks which + completed successfully (with exitCode 0) on the compute node. This + includes Job Manager tasks and normal tasks, but not Job Preparation, Job + Release or Start tasks. + :type total_tasks_succeeded: int + :param recent_tasks: A list of tasks whose state has recently changed. + This property is present only if at least one task has run on this node + since it was assigned to the pool. + :type recent_tasks: list[~azure.batch.models.TaskInformation] + :param start_task: The task specified to run on the compute node as it + joins the pool. + :type start_task: ~azure.batch.models.StartTask + :param start_task_info: Runtime information about the execution of the + start task on the compute node. + :type start_task_info: ~azure.batch.models.StartTaskInformation + :param certificate_references: The list of certificates installed on the + compute node. For Windows compute nodes, the Batch service installs the + certificates to the specified certificate store and location. For Linux + compute nodes, the certificates are stored in a directory inside the task + working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is + supplied to the task to query for this location. For certificates with + visibility of 'remoteUser', a 'certs' directory is created in the user's + home directory (e.g., /home/{user-name}/certs) and certificates are placed + in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param errors: The list of errors that are currently being encountered by + the compute node. + :type errors: list[~azure.batch.models.ComputeNodeError] + :param is_dedicated: Whether this compute node is a dedicated node. If + false, the node is a low-priority node. + :type is_dedicated: bool + :param endpoint_configuration: The endpoint configuration for the compute + node. + :type endpoint_configuration: + ~azure.batch.models.ComputeNodeEndpointConfiguration + :param node_agent_info: Information about the node agent version and the + time the node upgraded to a new version. + :type node_agent_info: ~azure.batch.models.NodeAgentInformation + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'ComputeNodeState'}, + 'scheduling_state': {'key': 'schedulingState', 'type': 'SchedulingState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'last_boot_time': {'key': 'lastBootTime', 'type': 'iso-8601'}, + 'allocation_time': {'key': 'allocationTime', 'type': 'iso-8601'}, + 'ip_address': {'key': 'ipAddress', 'type': 'str'}, + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'total_tasks_run': {'key': 'totalTasksRun', 'type': 'int'}, + 'running_tasks_count': {'key': 'runningTasksCount', 'type': 'int'}, + 'total_tasks_succeeded': {'key': 'totalTasksSucceeded', 'type': 'int'}, + 'recent_tasks': {'key': 'recentTasks', 'type': '[TaskInformation]'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'start_task_info': {'key': 'startTaskInfo', 'type': 'StartTaskInformation'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'errors': {'key': 'errors', 'type': '[ComputeNodeError]'}, + 'is_dedicated': {'key': 'isDedicated', 'type': 'bool'}, + 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'ComputeNodeEndpointConfiguration'}, + 'node_agent_info': {'key': 'nodeAgentInfo', 'type': 'NodeAgentInformation'}, + } + + def __init__(self, *, id: str=None, url: str=None, state=None, scheduling_state=None, state_transition_time=None, last_boot_time=None, allocation_time=None, ip_address: str=None, affinity_id: str=None, vm_size: str=None, total_tasks_run: int=None, running_tasks_count: int=None, total_tasks_succeeded: int=None, recent_tasks=None, start_task=None, start_task_info=None, certificate_references=None, errors=None, is_dedicated: bool=None, endpoint_configuration=None, node_agent_info=None, **kwargs) -> None: + super(ComputeNode, self).__init__(**kwargs) + self.id = id + self.url = url + self.state = state + self.scheduling_state = scheduling_state + self.state_transition_time = state_transition_time + self.last_boot_time = last_boot_time + self.allocation_time = allocation_time + self.ip_address = ip_address + self.affinity_id = affinity_id + self.vm_size = vm_size + self.total_tasks_run = total_tasks_run + self.running_tasks_count = running_tasks_count + self.total_tasks_succeeded = total_tasks_succeeded + self.recent_tasks = recent_tasks + self.start_task = start_task + self.start_task_info = start_task_info + self.certificate_references = certificate_references + self.errors = errors + self.is_dedicated = is_dedicated + self.endpoint_configuration = endpoint_configuration + self.node_agent_info = node_agent_info diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_reboot_options.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_reboot_options.py new file mode 100644 index 00000000..182c563e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_reboot_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeRebootOptions(Model): + """Additional parameters for reboot operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeRebootOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_reboot_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_reboot_options_py3.py new file mode 100644 index 00000000..97e8cb41 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_reboot_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeRebootOptions(Model): + """Additional parameters for reboot operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeRebootOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_reimage_options.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_reimage_options.py new file mode 100644 index 00000000..8ec6e55f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_reimage_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeReimageOptions(Model): + """Additional parameters for reimage operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeReimageOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_reimage_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_reimage_options_py3.py new file mode 100644 index 00000000..dcff3ee8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_reimage_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeReimageOptions(Model): + """Additional parameters for reimage operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeReimageOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_update_user_options.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_update_user_options.py new file mode 100644 index 00000000..ed1f9548 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_update_user_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUpdateUserOptions(Model): + """Additional parameters for update_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeUpdateUserOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_update_user_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_update_user_options_py3.py new file mode 100644 index 00000000..81e45b6c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_update_user_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUpdateUserOptions(Model): + """Additional parameters for update_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeUpdateUserOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_upload_batch_service_logs_options.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_upload_batch_service_logs_options.py new file mode 100644 index 00000000..071b712e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_upload_batch_service_logs_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUploadBatchServiceLogsOptions(Model): + """Additional parameters for upload_batch_service_logs operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeUploadBatchServiceLogsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_upload_batch_service_logs_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_upload_batch_service_logs_options_py3.py new file mode 100644 index 00000000..bac1dad5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_upload_batch_service_logs_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUploadBatchServiceLogsOptions(Model): + """Additional parameters for upload_batch_service_logs operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeUploadBatchServiceLogsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_user.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_user.py new file mode 100644 index 00000000..495b246e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_user.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUser(Model): + """A user account for RDP or SSH access on a compute node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The user name of the account. + :type name: str + :param is_admin: Whether the account should be an administrator on the + compute node. The default value is false. + :type is_admin: bool + :param expiry_time: The time at which the account should expire. If + omitted, the default is 1 day from the current time. For Linux compute + nodes, the expiryTime has a precision up to a day. + :type expiry_time: datetime + :param password: The password of the account. The password is required for + Windows nodes (those created with 'cloudServiceConfiguration', or created + with 'virtualMachineConfiguration' using a Windows image reference). For + Linux compute nodes, the password can optionally be specified along with + the sshPublicKey property. + :type password: str + :param ssh_public_key: The SSH public key that can be used for remote + login to the compute node. The public key should be compatible with + OpenSSH encoding and should be base 64 encoded. This property can be + specified only for Linux nodes. If this is specified for a Windows node, + then the Batch service rejects the request; if you are calling the REST + API directly, the HTTP status code is 400 (Bad Request). + :type ssh_public_key: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'is_admin': {'key': 'isAdmin', 'type': 'bool'}, + 'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'}, + 'password': {'key': 'password', 'type': 'str'}, + 'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeUser, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.is_admin = kwargs.get('is_admin', None) + self.expiry_time = kwargs.get('expiry_time', None) + self.password = kwargs.get('password', None) + self.ssh_public_key = kwargs.get('ssh_public_key', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/compute_node_user_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_user_py3.py new file mode 100644 index 00000000..a88a6ab2 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/compute_node_user_py3.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUser(Model): + """A user account for RDP or SSH access on a compute node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The user name of the account. + :type name: str + :param is_admin: Whether the account should be an administrator on the + compute node. The default value is false. + :type is_admin: bool + :param expiry_time: The time at which the account should expire. If + omitted, the default is 1 day from the current time. For Linux compute + nodes, the expiryTime has a precision up to a day. + :type expiry_time: datetime + :param password: The password of the account. The password is required for + Windows nodes (those created with 'cloudServiceConfiguration', or created + with 'virtualMachineConfiguration' using a Windows image reference). For + Linux compute nodes, the password can optionally be specified along with + the sshPublicKey property. + :type password: str + :param ssh_public_key: The SSH public key that can be used for remote + login to the compute node. The public key should be compatible with + OpenSSH encoding and should be base 64 encoded. This property can be + specified only for Linux nodes. If this is specified for a Windows node, + then the Batch service rejects the request; if you are calling the REST + API directly, the HTTP status code is 400 (Bad Request). + :type ssh_public_key: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'is_admin': {'key': 'isAdmin', 'type': 'bool'}, + 'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'}, + 'password': {'key': 'password', 'type': 'str'}, + 'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'}, + } + + def __init__(self, *, name: str, is_admin: bool=None, expiry_time=None, password: str=None, ssh_public_key: str=None, **kwargs) -> None: + super(ComputeNodeUser, self).__init__(**kwargs) + self.name = name + self.is_admin = is_admin + self.expiry_time = expiry_time + self.password = password + self.ssh_public_key = ssh_public_key diff --git a/azext/generated/sdk/batch/v2018_08_01/models/container_configuration.py b/azext/generated/sdk/batch/v2018_08_01/models/container_configuration.py new file mode 100644 index 00000000..f4b932c4 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/container_configuration.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerConfiguration(Model): + """The configuration for container-enabled pools. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar type: Required. The container technology to be used. Default value: + "dockerCompatible" . + :vartype type: str + :param container_image_names: The collection of container image names. + This is the full image reference, as would be specified to "docker pull". + An image will be sourced from the default Docker registry unless the image + is fully qualified with an alternative registry. + :type container_image_names: list[str] + :param container_registries: Additional private registries from which + containers can be pulled. If any images must be downloaded from a private + registry which requires credentials, then those credentials must be + provided here. + :type container_registries: list[~azure.batch.models.ContainerRegistry] + """ + + _validation = { + 'type': {'required': True, 'constant': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'container_image_names': {'key': 'containerImageNames', 'type': '[str]'}, + 'container_registries': {'key': 'containerRegistries', 'type': '[ContainerRegistry]'}, + } + + type = "dockerCompatible" + + def __init__(self, **kwargs): + super(ContainerConfiguration, self).__init__(**kwargs) + self.container_image_names = kwargs.get('container_image_names', None) + self.container_registries = kwargs.get('container_registries', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/container_configuration_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/container_configuration_py3.py new file mode 100644 index 00000000..f65b047e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/container_configuration_py3.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerConfiguration(Model): + """The configuration for container-enabled pools. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar type: Required. The container technology to be used. Default value: + "dockerCompatible" . + :vartype type: str + :param container_image_names: The collection of container image names. + This is the full image reference, as would be specified to "docker pull". + An image will be sourced from the default Docker registry unless the image + is fully qualified with an alternative registry. + :type container_image_names: list[str] + :param container_registries: Additional private registries from which + containers can be pulled. If any images must be downloaded from a private + registry which requires credentials, then those credentials must be + provided here. + :type container_registries: list[~azure.batch.models.ContainerRegistry] + """ + + _validation = { + 'type': {'required': True, 'constant': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'container_image_names': {'key': 'containerImageNames', 'type': '[str]'}, + 'container_registries': {'key': 'containerRegistries', 'type': '[ContainerRegistry]'}, + } + + type = "dockerCompatible" + + def __init__(self, *, container_image_names=None, container_registries=None, **kwargs) -> None: + super(ContainerConfiguration, self).__init__(**kwargs) + self.container_image_names = container_image_names + self.container_registries = container_registries diff --git a/azext/generated/sdk/batch/v2018_08_01/models/container_registry.py b/azext/generated/sdk/batch/v2018_08_01/models/container_registry.py new file mode 100644 index 00000000..18203196 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/container_registry.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerRegistry(Model): + """A private container registry. + + All required parameters must be populated in order to send to Azure. + + :param registry_server: The registry URL. If omitted, the default is + "docker.io". + :type registry_server: str + :param user_name: Required. The user name to log into the registry server. + :type user_name: str + :param password: Required. The password to log into the registry server. + :type password: str + """ + + _validation = { + 'user_name': {'required': True}, + 'password': {'required': True}, + } + + _attribute_map = { + 'registry_server': {'key': 'registryServer', 'type': 'str'}, + 'user_name': {'key': 'username', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ContainerRegistry, self).__init__(**kwargs) + self.registry_server = kwargs.get('registry_server', None) + self.user_name = kwargs.get('user_name', None) + self.password = kwargs.get('password', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/container_registry_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/container_registry_py3.py new file mode 100644 index 00000000..eb47f9e5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/container_registry_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerRegistry(Model): + """A private container registry. + + All required parameters must be populated in order to send to Azure. + + :param registry_server: The registry URL. If omitted, the default is + "docker.io". + :type registry_server: str + :param user_name: Required. The user name to log into the registry server. + :type user_name: str + :param password: Required. The password to log into the registry server. + :type password: str + """ + + _validation = { + 'user_name': {'required': True}, + 'password': {'required': True}, + } + + _attribute_map = { + 'registry_server': {'key': 'registryServer', 'type': 'str'}, + 'user_name': {'key': 'username', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, *, user_name: str, password: str, registry_server: str=None, **kwargs) -> None: + super(ContainerRegistry, self).__init__(**kwargs) + self.registry_server = registry_server + self.user_name = user_name + self.password = password diff --git a/azext/generated/sdk/batch/v2018_08_01/models/data_disk.py b/azext/generated/sdk/batch/v2018_08_01/models/data_disk.py new file mode 100644 index 00000000..d2f80dad --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/data_disk.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DataDisk(Model): + """Settings which will be used by the data disks associated to compute nodes + in the pool. + + All required parameters must be populated in order to send to Azure. + + :param lun: Required. The logical unit number. The lun is used to uniquely + identify each data disk. If attaching multiple disks, each should have a + distinct lun. + :type lun: int + :param caching: The type of caching to be enabled for the data disks. The + default value for caching is readwrite. For information about the caching + options see: + https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. + Possible values include: 'none', 'readOnly', 'readWrite' + :type caching: str or ~azure.batch.models.CachingType + :param disk_size_gb: Required. The initial disk size in gigabytes. + :type disk_size_gb: int + :param storage_account_type: The storage account type to be used for the + data disk. If omitted, the default is "standard_lrs". Possible values + include: 'StandardLRS', 'PremiumLRS' + :type storage_account_type: str or ~azure.batch.models.StorageAccountType + """ + + _validation = { + 'lun': {'required': True}, + 'disk_size_gb': {'required': True}, + } + + _attribute_map = { + 'lun': {'key': 'lun', 'type': 'int'}, + 'caching': {'key': 'caching', 'type': 'CachingType'}, + 'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'}, + 'storage_account_type': {'key': 'storageAccountType', 'type': 'StorageAccountType'}, + } + + def __init__(self, **kwargs): + super(DataDisk, self).__init__(**kwargs) + self.lun = kwargs.get('lun', None) + self.caching = kwargs.get('caching', None) + self.disk_size_gb = kwargs.get('disk_size_gb', None) + self.storage_account_type = kwargs.get('storage_account_type', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/data_disk_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/data_disk_py3.py new file mode 100644 index 00000000..2af0ce19 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/data_disk_py3.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DataDisk(Model): + """Settings which will be used by the data disks associated to compute nodes + in the pool. + + All required parameters must be populated in order to send to Azure. + + :param lun: Required. The logical unit number. The lun is used to uniquely + identify each data disk. If attaching multiple disks, each should have a + distinct lun. + :type lun: int + :param caching: The type of caching to be enabled for the data disks. The + default value for caching is readwrite. For information about the caching + options see: + https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. + Possible values include: 'none', 'readOnly', 'readWrite' + :type caching: str or ~azure.batch.models.CachingType + :param disk_size_gb: Required. The initial disk size in gigabytes. + :type disk_size_gb: int + :param storage_account_type: The storage account type to be used for the + data disk. If omitted, the default is "standard_lrs". Possible values + include: 'StandardLRS', 'PremiumLRS' + :type storage_account_type: str or ~azure.batch.models.StorageAccountType + """ + + _validation = { + 'lun': {'required': True}, + 'disk_size_gb': {'required': True}, + } + + _attribute_map = { + 'lun': {'key': 'lun', 'type': 'int'}, + 'caching': {'key': 'caching', 'type': 'CachingType'}, + 'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'}, + 'storage_account_type': {'key': 'storageAccountType', 'type': 'StorageAccountType'}, + } + + def __init__(self, *, lun: int, disk_size_gb: int, caching=None, storage_account_type=None, **kwargs) -> None: + super(DataDisk, self).__init__(**kwargs) + self.lun = lun + self.caching = caching + self.disk_size_gb = disk_size_gb + self.storage_account_type = storage_account_type diff --git a/azext/generated/sdk/batch/v2018_08_01/models/delete_certificate_error.py b/azext/generated/sdk/batch/v2018_08_01/models/delete_certificate_error.py new file mode 100644 index 00000000..dbb14c3c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/delete_certificate_error.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DeleteCertificateError(Model): + """An error encountered by the Batch service when deleting a certificate. + + :param code: An identifier for the certificate deletion error. Codes are + invariant and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the certificate deletion error, + intended to be suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the + certificate deletion error. This list includes details such as the active + pools and nodes referencing this certificate. However, if a large number + of resources reference the certificate, the list contains only about the + first hundred. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(DeleteCertificateError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.values = kwargs.get('values', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/delete_certificate_error_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/delete_certificate_error_py3.py new file mode 100644 index 00000000..246a12a0 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/delete_certificate_error_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DeleteCertificateError(Model): + """An error encountered by the Batch service when deleting a certificate. + + :param code: An identifier for the certificate deletion error. Codes are + invariant and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the certificate deletion error, + intended to be suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the + certificate deletion error. This list includes details such as the active + pools and nodes referencing this certificate. However, if a large number + of resources reference the certificate, the list contains only about the + first hundred. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, code: str=None, message: str=None, values=None, **kwargs) -> None: + super(DeleteCertificateError, self).__init__(**kwargs) + self.code = code + self.message = message + self.values = values diff --git a/azext/generated/sdk/batch/v2018_08_01/models/environment_setting.py b/azext/generated/sdk/batch/v2018_08_01/models/environment_setting.py new file mode 100644 index 00000000..e46a6e5e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/environment_setting.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class EnvironmentSetting(Model): + """An environment variable to be set on a task process. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the environment variable. + :type name: str + :param value: The value of the environment variable. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(EnvironmentSetting, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/environment_setting_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/environment_setting_py3.py new file mode 100644 index 00000000..facb8f44 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/environment_setting_py3.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class EnvironmentSetting(Model): + """An environment variable to be set on a task process. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the environment variable. + :type name: str + :param value: The value of the environment variable. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, *, name: str, value: str=None, **kwargs) -> None: + super(EnvironmentSetting, self).__init__(**kwargs) + self.name = name + self.value = value diff --git a/azext/generated/sdk/batch/v2018_08_01/models/error_message.py b/azext/generated/sdk/batch/v2018_08_01/models/error_message.py new file mode 100644 index 00000000..bbdf64f1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/error_message.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ErrorMessage(Model): + """An error message received in an Azure Batch error response. + + :param lang: The language code of the error message. + :type lang: str + :param value: The text of the message. + :type value: str + """ + + _attribute_map = { + 'lang': {'key': 'lang', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ErrorMessage, self).__init__(**kwargs) + self.lang = kwargs.get('lang', None) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/error_message_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/error_message_py3.py new file mode 100644 index 00000000..a84934fc --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/error_message_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ErrorMessage(Model): + """An error message received in an Azure Batch error response. + + :param lang: The language code of the error message. + :type lang: str + :param value: The text of the message. + :type value: str + """ + + _attribute_map = { + 'lang': {'key': 'lang', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, *, lang: str=None, value: str=None, **kwargs) -> None: + super(ErrorMessage, self).__init__(**kwargs) + self.lang = lang + self.value = value diff --git a/azext/generated/sdk/batch/v2018_08_01/models/exit_code_mapping.py b/azext/generated/sdk/batch/v2018_08_01/models/exit_code_mapping.py new file mode 100644 index 00000000..57977e3d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/exit_code_mapping.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitCodeMapping(Model): + """How the Batch service should respond if a task exits with a particular exit + code. + + All required parameters must be populated in order to send to Azure. + + :param code: Required. A process exit code. + :type code: int + :param exit_options: Required. How the Batch service should respond if the + task exits with this exit code. + :type exit_options: ~azure.batch.models.ExitOptions + """ + + _validation = { + 'code': {'required': True}, + 'exit_options': {'required': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'int'}, + 'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'}, + } + + def __init__(self, **kwargs): + super(ExitCodeMapping, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.exit_options = kwargs.get('exit_options', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/exit_code_mapping_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/exit_code_mapping_py3.py new file mode 100644 index 00000000..5a5176bb --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/exit_code_mapping_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitCodeMapping(Model): + """How the Batch service should respond if a task exits with a particular exit + code. + + All required parameters must be populated in order to send to Azure. + + :param code: Required. A process exit code. + :type code: int + :param exit_options: Required. How the Batch service should respond if the + task exits with this exit code. + :type exit_options: ~azure.batch.models.ExitOptions + """ + + _validation = { + 'code': {'required': True}, + 'exit_options': {'required': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'int'}, + 'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'}, + } + + def __init__(self, *, code: int, exit_options, **kwargs) -> None: + super(ExitCodeMapping, self).__init__(**kwargs) + self.code = code + self.exit_options = exit_options diff --git a/azext/generated/sdk/batch/v2018_08_01/models/exit_code_range_mapping.py b/azext/generated/sdk/batch/v2018_08_01/models/exit_code_range_mapping.py new file mode 100644 index 00000000..999272ae --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/exit_code_range_mapping.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitCodeRangeMapping(Model): + """A range of exit codes and how the Batch service should respond to exit + codes within that range. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. The first exit code in the range. + :type start: int + :param end: Required. The last exit code in the range. + :type end: int + :param exit_options: Required. How the Batch service should respond if the + task exits with an exit code in the range start to end (inclusive). + :type exit_options: ~azure.batch.models.ExitOptions + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + 'exit_options': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'start', 'type': 'int'}, + 'end': {'key': 'end', 'type': 'int'}, + 'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'}, + } + + def __init__(self, **kwargs): + super(ExitCodeRangeMapping, self).__init__(**kwargs) + self.start = kwargs.get('start', None) + self.end = kwargs.get('end', None) + self.exit_options = kwargs.get('exit_options', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/exit_code_range_mapping_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/exit_code_range_mapping_py3.py new file mode 100644 index 00000000..9a3da0bd --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/exit_code_range_mapping_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitCodeRangeMapping(Model): + """A range of exit codes and how the Batch service should respond to exit + codes within that range. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. The first exit code in the range. + :type start: int + :param end: Required. The last exit code in the range. + :type end: int + :param exit_options: Required. How the Batch service should respond if the + task exits with an exit code in the range start to end (inclusive). + :type exit_options: ~azure.batch.models.ExitOptions + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + 'exit_options': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'start', 'type': 'int'}, + 'end': {'key': 'end', 'type': 'int'}, + 'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'}, + } + + def __init__(self, *, start: int, end: int, exit_options, **kwargs) -> None: + super(ExitCodeRangeMapping, self).__init__(**kwargs) + self.start = start + self.end = end + self.exit_options = exit_options diff --git a/azext/generated/sdk/batch/v2018_08_01/models/exit_conditions.py b/azext/generated/sdk/batch/v2018_08_01/models/exit_conditions.py new file mode 100644 index 00000000..66085863 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/exit_conditions.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitConditions(Model): + """Specifies how the Batch service should respond when the task completes. + + :param exit_codes: A list of individual task exit codes and how the Batch + service should respond to them. + :type exit_codes: list[~azure.batch.models.ExitCodeMapping] + :param exit_code_ranges: A list of task exit code ranges and how the Batch + service should respond to them. + :type exit_code_ranges: list[~azure.batch.models.ExitCodeRangeMapping] + :param pre_processing_error: How the Batch service should respond if the + task fails to start due to an error. + :type pre_processing_error: ~azure.batch.models.ExitOptions + :param file_upload_error: How the Batch service should respond if a file + upload error occurs. If the task exited with an exit code that was + specified via exitCodes or exitCodeRanges, and then encountered a file + upload error, then the action specified by the exit code takes precedence. + :type file_upload_error: ~azure.batch.models.ExitOptions + :param default: How the Batch service should respond if the task fails + with an exit condition not covered by any of the other properties. This + value is used if the task exits with any nonzero exit code not listed in + the exitCodes or exitCodeRanges collection, with a pre-processing error if + the preProcessingError property is not present, or with a file upload + error if the fileUploadError property is not present. If you want + non-default behavior on exit code 0, you must list it explicitly using the + exitCodes or exitCodeRanges collection. + :type default: ~azure.batch.models.ExitOptions + """ + + _attribute_map = { + 'exit_codes': {'key': 'exitCodes', 'type': '[ExitCodeMapping]'}, + 'exit_code_ranges': {'key': 'exitCodeRanges', 'type': '[ExitCodeRangeMapping]'}, + 'pre_processing_error': {'key': 'preProcessingError', 'type': 'ExitOptions'}, + 'file_upload_error': {'key': 'fileUploadError', 'type': 'ExitOptions'}, + 'default': {'key': 'default', 'type': 'ExitOptions'}, + } + + def __init__(self, **kwargs): + super(ExitConditions, self).__init__(**kwargs) + self.exit_codes = kwargs.get('exit_codes', None) + self.exit_code_ranges = kwargs.get('exit_code_ranges', None) + self.pre_processing_error = kwargs.get('pre_processing_error', None) + self.file_upload_error = kwargs.get('file_upload_error', None) + self.default = kwargs.get('default', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/exit_conditions_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/exit_conditions_py3.py new file mode 100644 index 00000000..65cd5aaa --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/exit_conditions_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitConditions(Model): + """Specifies how the Batch service should respond when the task completes. + + :param exit_codes: A list of individual task exit codes and how the Batch + service should respond to them. + :type exit_codes: list[~azure.batch.models.ExitCodeMapping] + :param exit_code_ranges: A list of task exit code ranges and how the Batch + service should respond to them. + :type exit_code_ranges: list[~azure.batch.models.ExitCodeRangeMapping] + :param pre_processing_error: How the Batch service should respond if the + task fails to start due to an error. + :type pre_processing_error: ~azure.batch.models.ExitOptions + :param file_upload_error: How the Batch service should respond if a file + upload error occurs. If the task exited with an exit code that was + specified via exitCodes or exitCodeRanges, and then encountered a file + upload error, then the action specified by the exit code takes precedence. + :type file_upload_error: ~azure.batch.models.ExitOptions + :param default: How the Batch service should respond if the task fails + with an exit condition not covered by any of the other properties. This + value is used if the task exits with any nonzero exit code not listed in + the exitCodes or exitCodeRanges collection, with a pre-processing error if + the preProcessingError property is not present, or with a file upload + error if the fileUploadError property is not present. If you want + non-default behavior on exit code 0, you must list it explicitly using the + exitCodes or exitCodeRanges collection. + :type default: ~azure.batch.models.ExitOptions + """ + + _attribute_map = { + 'exit_codes': {'key': 'exitCodes', 'type': '[ExitCodeMapping]'}, + 'exit_code_ranges': {'key': 'exitCodeRanges', 'type': '[ExitCodeRangeMapping]'}, + 'pre_processing_error': {'key': 'preProcessingError', 'type': 'ExitOptions'}, + 'file_upload_error': {'key': 'fileUploadError', 'type': 'ExitOptions'}, + 'default': {'key': 'default', 'type': 'ExitOptions'}, + } + + def __init__(self, *, exit_codes=None, exit_code_ranges=None, pre_processing_error=None, file_upload_error=None, default=None, **kwargs) -> None: + super(ExitConditions, self).__init__(**kwargs) + self.exit_codes = exit_codes + self.exit_code_ranges = exit_code_ranges + self.pre_processing_error = pre_processing_error + self.file_upload_error = file_upload_error + self.default = default diff --git a/azext/generated/sdk/batch/v2018_08_01/models/exit_options.py b/azext/generated/sdk/batch/v2018_08_01/models/exit_options.py new file mode 100644 index 00000000..3e2dee00 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/exit_options.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitOptions(Model): + """Specifies how the Batch service responds to a particular exit condition. + + :param job_action: An action to take on the job containing the task, if + the task completes with the given exit condition and the job's + onTaskFailed property is 'performExitOptionsJobAction'. The default is + none for exit code 0 and terminate for all other exit conditions. If the + job's onTaskFailed property is noAction, then specifying this property + returns an error and the add task request fails with an invalid property + value error; if you are calling the REST API directly, the HTTP status + code is 400 (Bad Request). Possible values include: 'none', 'disable', + 'terminate' + :type job_action: str or ~azure.batch.models.JobAction + :param dependency_action: An action that the Batch service performs on + tasks that depend on this task. The default is 'satisfy' for exit code 0, + and 'block' for all other exit conditions. If the job's + usesTaskDependencies property is set to false, then specifying the + dependencyAction property returns an error and the add task request fails + with an invalid property value error; if you are calling the REST API + directly, the HTTP status code is 400 (Bad Request). Possible values + include: 'satisfy', 'block' + :type dependency_action: str or ~azure.batch.models.DependencyAction + """ + + _attribute_map = { + 'job_action': {'key': 'jobAction', 'type': 'JobAction'}, + 'dependency_action': {'key': 'dependencyAction', 'type': 'DependencyAction'}, + } + + def __init__(self, **kwargs): + super(ExitOptions, self).__init__(**kwargs) + self.job_action = kwargs.get('job_action', None) + self.dependency_action = kwargs.get('dependency_action', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/exit_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/exit_options_py3.py new file mode 100644 index 00000000..ac81ee1b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/exit_options_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitOptions(Model): + """Specifies how the Batch service responds to a particular exit condition. + + :param job_action: An action to take on the job containing the task, if + the task completes with the given exit condition and the job's + onTaskFailed property is 'performExitOptionsJobAction'. The default is + none for exit code 0 and terminate for all other exit conditions. If the + job's onTaskFailed property is noAction, then specifying this property + returns an error and the add task request fails with an invalid property + value error; if you are calling the REST API directly, the HTTP status + code is 400 (Bad Request). Possible values include: 'none', 'disable', + 'terminate' + :type job_action: str or ~azure.batch.models.JobAction + :param dependency_action: An action that the Batch service performs on + tasks that depend on this task. The default is 'satisfy' for exit code 0, + and 'block' for all other exit conditions. If the job's + usesTaskDependencies property is set to false, then specifying the + dependencyAction property returns an error and the add task request fails + with an invalid property value error; if you are calling the REST API + directly, the HTTP status code is 400 (Bad Request). Possible values + include: 'satisfy', 'block' + :type dependency_action: str or ~azure.batch.models.DependencyAction + """ + + _attribute_map = { + 'job_action': {'key': 'jobAction', 'type': 'JobAction'}, + 'dependency_action': {'key': 'dependencyAction', 'type': 'DependencyAction'}, + } + + def __init__(self, *, job_action=None, dependency_action=None, **kwargs) -> None: + super(ExitOptions, self).__init__(**kwargs) + self.job_action = job_action + self.dependency_action = dependency_action diff --git a/azext/generated/sdk/batch/v2018_08_01/models/file_delete_from_compute_node_options.py b/azext/generated/sdk/batch/v2018_08_01/models/file_delete_from_compute_node_options.py new file mode 100644 index 00000000..7522e806 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/file_delete_from_compute_node_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileDeleteFromComputeNodeOptions(Model): + """Additional parameters for delete_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileDeleteFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/file_delete_from_compute_node_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/file_delete_from_compute_node_options_py3.py new file mode 100644 index 00000000..62291d14 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/file_delete_from_compute_node_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileDeleteFromComputeNodeOptions(Model): + """Additional parameters for delete_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(FileDeleteFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/file_delete_from_task_options.py b/azext/generated/sdk/batch/v2018_08_01/models/file_delete_from_task_options.py new file mode 100644 index 00000000..054babe8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/file_delete_from_task_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileDeleteFromTaskOptions(Model): + """Additional parameters for delete_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileDeleteFromTaskOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/file_delete_from_task_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/file_delete_from_task_options_py3.py new file mode 100644 index 00000000..7d783006 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/file_delete_from_task_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileDeleteFromTaskOptions(Model): + """Additional parameters for delete_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(FileDeleteFromTaskOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/file_get_from_compute_node_options.py b/azext/generated/sdk/batch/v2018_08_01/models/file_get_from_compute_node_options.py new file mode 100644 index 00000000..9a6e3fb7 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/file_get_from_compute_node_options.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetFromComputeNodeOptions(Model): + """Additional parameters for get_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param ocp_range: The byte range to be retrieved. The default is to + retrieve the entire file. The format is bytes=startRange-endRange. + :type ocp_range: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'ocp_range': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileGetFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.ocp_range = kwargs.get('ocp_range', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/file_get_from_compute_node_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/file_get_from_compute_node_options_py3.py new file mode 100644 index 00000000..ab3dc34f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/file_get_from_compute_node_options_py3.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetFromComputeNodeOptions(Model): + """Additional parameters for get_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param ocp_range: The byte range to be retrieved. The default is to + retrieve the entire file. The format is bytes=startRange-endRange. + :type ocp_range: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'ocp_range': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, ocp_range: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(FileGetFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.ocp_range = ocp_range + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/file_get_from_task_options.py b/azext/generated/sdk/batch/v2018_08_01/models/file_get_from_task_options.py new file mode 100644 index 00000000..19bd5cde --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/file_get_from_task_options.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetFromTaskOptions(Model): + """Additional parameters for get_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param ocp_range: The byte range to be retrieved. The default is to + retrieve the entire file. The format is bytes=startRange-endRange. + :type ocp_range: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'ocp_range': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileGetFromTaskOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.ocp_range = kwargs.get('ocp_range', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/file_get_from_task_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/file_get_from_task_options_py3.py new file mode 100644 index 00000000..30ec6583 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/file_get_from_task_options_py3.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetFromTaskOptions(Model): + """Additional parameters for get_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param ocp_range: The byte range to be retrieved. The default is to + retrieve the entire file. The format is bytes=startRange-endRange. + :type ocp_range: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'ocp_range': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, ocp_range: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(FileGetFromTaskOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.ocp_range = ocp_range + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/file_get_properties_from_compute_node_options.py b/azext/generated/sdk/batch/v2018_08_01/models/file_get_properties_from_compute_node_options.py new file mode 100644 index 00000000..bf283d1d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/file_get_properties_from_compute_node_options.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetPropertiesFromComputeNodeOptions(Model): + """Additional parameters for get_properties_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileGetPropertiesFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/file_get_properties_from_compute_node_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/file_get_properties_from_compute_node_options_py3.py new file mode 100644 index 00000000..69a90184 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/file_get_properties_from_compute_node_options_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetPropertiesFromComputeNodeOptions(Model): + """Additional parameters for get_properties_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(FileGetPropertiesFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/file_get_properties_from_task_options.py b/azext/generated/sdk/batch/v2018_08_01/models/file_get_properties_from_task_options.py new file mode 100644 index 00000000..836387d3 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/file_get_properties_from_task_options.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetPropertiesFromTaskOptions(Model): + """Additional parameters for get_properties_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileGetPropertiesFromTaskOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/file_get_properties_from_task_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/file_get_properties_from_task_options_py3.py new file mode 100644 index 00000000..73996895 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/file_get_properties_from_task_options_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetPropertiesFromTaskOptions(Model): + """Additional parameters for get_properties_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(FileGetPropertiesFromTaskOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/file_list_from_compute_node_options.py b/azext/generated/sdk/batch/v2018_08_01/models/file_list_from_compute_node_options.py new file mode 100644 index 00000000..dc32df46 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/file_list_from_compute_node_options.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileListFromComputeNodeOptions(Model): + """Additional parameters for list_from_compute_node operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 files can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileListFromComputeNodeOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/file_list_from_compute_node_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/file_list_from_compute_node_options_py3.py new file mode 100644 index 00000000..e475dcde --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/file_list_from_compute_node_options_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileListFromComputeNodeOptions(Model): + """Additional parameters for list_from_compute_node operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 files can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(FileListFromComputeNodeOptions, self).__init__(**kwargs) + self.filter = filter + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/file_list_from_task_options.py b/azext/generated/sdk/batch/v2018_08_01/models/file_list_from_task_options.py new file mode 100644 index 00000000..86728b25 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/file_list_from_task_options.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileListFromTaskOptions(Model): + """Additional parameters for list_from_task operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 files can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileListFromTaskOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/file_list_from_task_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/file_list_from_task_options_py3.py new file mode 100644 index 00000000..354c4869 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/file_list_from_task_options_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileListFromTaskOptions(Model): + """Additional parameters for list_from_task operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 files can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(FileListFromTaskOptions, self).__init__(**kwargs) + self.filter = filter + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/file_properties.py b/azext/generated/sdk/batch/v2018_08_01/models/file_properties.py new file mode 100644 index 00000000..3cef0000 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/file_properties.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileProperties(Model): + """The properties of a file on a compute node. + + All required parameters must be populated in order to send to Azure. + + :param creation_time: The file creation time. The creation time is not + returned for files on Linux compute nodes. + :type creation_time: datetime + :param last_modified: Required. The time at which the file was last + modified. + :type last_modified: datetime + :param content_length: Required. The length of the file. + :type content_length: long + :param content_type: The content type of the file. + :type content_type: str + :param file_mode: The file mode attribute in octal format. The file mode + is returned only for files on Linux compute nodes. + :type file_mode: str + """ + + _validation = { + 'last_modified': {'required': True}, + 'content_length': {'required': True}, + } + + _attribute_map = { + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'content_length': {'key': 'contentLength', 'type': 'long'}, + 'content_type': {'key': 'contentType', 'type': 'str'}, + 'file_mode': {'key': 'fileMode', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(FileProperties, self).__init__(**kwargs) + self.creation_time = kwargs.get('creation_time', None) + self.last_modified = kwargs.get('last_modified', None) + self.content_length = kwargs.get('content_length', None) + self.content_type = kwargs.get('content_type', None) + self.file_mode = kwargs.get('file_mode', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/file_properties_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/file_properties_py3.py new file mode 100644 index 00000000..71c2a8e6 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/file_properties_py3.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileProperties(Model): + """The properties of a file on a compute node. + + All required parameters must be populated in order to send to Azure. + + :param creation_time: The file creation time. The creation time is not + returned for files on Linux compute nodes. + :type creation_time: datetime + :param last_modified: Required. The time at which the file was last + modified. + :type last_modified: datetime + :param content_length: Required. The length of the file. + :type content_length: long + :param content_type: The content type of the file. + :type content_type: str + :param file_mode: The file mode attribute in octal format. The file mode + is returned only for files on Linux compute nodes. + :type file_mode: str + """ + + _validation = { + 'last_modified': {'required': True}, + 'content_length': {'required': True}, + } + + _attribute_map = { + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'content_length': {'key': 'contentLength', 'type': 'long'}, + 'content_type': {'key': 'contentType', 'type': 'str'}, + 'file_mode': {'key': 'fileMode', 'type': 'str'}, + } + + def __init__(self, *, last_modified, content_length: int, creation_time=None, content_type: str=None, file_mode: str=None, **kwargs) -> None: + super(FileProperties, self).__init__(**kwargs) + self.creation_time = creation_time + self.last_modified = last_modified + self.content_length = content_length + self.content_type = content_type + self.file_mode = file_mode diff --git a/azext/generated/sdk/batch/v2018_08_01/models/image_reference.py b/azext/generated/sdk/batch/v2018_08_01/models/image_reference.py new file mode 100644 index 00000000..41fb7fbc --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/image_reference.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageReference(Model): + """A reference to an Azure Virtual Machines Marketplace image or a custom + Azure Virtual Machine image. To get the list of all Azure Marketplace image + references verified by Azure Batch, see the 'List node agent SKUs' + operation. + + :param publisher: The publisher of the Azure Virtual Machines Marketplace + image. For example, Canonical or MicrosoftWindowsServer. + :type publisher: str + :param offer: The offer type of the Azure Virtual Machines Marketplace + image. For example, UbuntuServer or WindowsServer. + :type offer: str + :param sku: The SKU of the Azure Virtual Machines Marketplace image. For + example, 14.04.0-LTS or 2012-R2-Datacenter. + :type sku: str + :param version: The version of the Azure Virtual Machines Marketplace + image. A value of 'latest' can be specified to select the latest version + of an image. If omitted, the default is 'latest'. + :type version: str + :param virtual_machine_image_id: The ARM resource identifier of the + virtual machine image. Computes nodes of the pool will be created using + this custom image. This is of the form + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. + This property is mutually exclusive with other ImageReference properties. + The virtual machine image must be in the same region and subscription as + the Azure Batch account. For information about the firewall settings for + the Batch node agent to communicate with the Batch service see + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + :type virtual_machine_image_id: str + """ + + _attribute_map = { + 'publisher': {'key': 'publisher', 'type': 'str'}, + 'offer': {'key': 'offer', 'type': 'str'}, + 'sku': {'key': 'sku', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'virtual_machine_image_id': {'key': 'virtualMachineImageId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ImageReference, self).__init__(**kwargs) + self.publisher = kwargs.get('publisher', None) + self.offer = kwargs.get('offer', None) + self.sku = kwargs.get('sku', None) + self.version = kwargs.get('version', None) + self.virtual_machine_image_id = kwargs.get('virtual_machine_image_id', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/image_reference_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/image_reference_py3.py new file mode 100644 index 00000000..7471294d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/image_reference_py3.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageReference(Model): + """A reference to an Azure Virtual Machines Marketplace image or a custom + Azure Virtual Machine image. To get the list of all Azure Marketplace image + references verified by Azure Batch, see the 'List node agent SKUs' + operation. + + :param publisher: The publisher of the Azure Virtual Machines Marketplace + image. For example, Canonical or MicrosoftWindowsServer. + :type publisher: str + :param offer: The offer type of the Azure Virtual Machines Marketplace + image. For example, UbuntuServer or WindowsServer. + :type offer: str + :param sku: The SKU of the Azure Virtual Machines Marketplace image. For + example, 14.04.0-LTS or 2012-R2-Datacenter. + :type sku: str + :param version: The version of the Azure Virtual Machines Marketplace + image. A value of 'latest' can be specified to select the latest version + of an image. If omitted, the default is 'latest'. + :type version: str + :param virtual_machine_image_id: The ARM resource identifier of the + virtual machine image. Computes nodes of the pool will be created using + this custom image. This is of the form + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. + This property is mutually exclusive with other ImageReference properties. + The virtual machine image must be in the same region and subscription as + the Azure Batch account. For information about the firewall settings for + the Batch node agent to communicate with the Batch service see + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + :type virtual_machine_image_id: str + """ + + _attribute_map = { + 'publisher': {'key': 'publisher', 'type': 'str'}, + 'offer': {'key': 'offer', 'type': 'str'}, + 'sku': {'key': 'sku', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'virtual_machine_image_id': {'key': 'virtualMachineImageId', 'type': 'str'}, + } + + def __init__(self, *, publisher: str=None, offer: str=None, sku: str=None, version: str=None, virtual_machine_image_id: str=None, **kwargs) -> None: + super(ImageReference, self).__init__(**kwargs) + self.publisher = publisher + self.offer = offer + self.sku = sku + self.version = version + self.virtual_machine_image_id = virtual_machine_image_id diff --git a/azext/generated/sdk/batch/v2018_08_01/models/inbound_endpoint.py b/azext/generated/sdk/batch/v2018_08_01/models/inbound_endpoint.py new file mode 100644 index 00000000..8fd064a9 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/inbound_endpoint.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class InboundEndpoint(Model): + """An inbound endpoint on a compute node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the endpoint. + :type name: str + :param protocol: Required. The protocol of the endpoint. Possible values + include: 'tcp', 'udp' + :type protocol: str or ~azure.batch.models.InboundEndpointProtocol + :param public_ip_address: Required. The public IP address of the compute + node. + :type public_ip_address: str + :param public_fqdn: Required. The public fully qualified domain name for + the compute node. + :type public_fqdn: str + :param frontend_port: Required. The public port number of the endpoint. + :type frontend_port: int + :param backend_port: Required. The backend port number of the endpoint. + :type backend_port: int + """ + + _validation = { + 'name': {'required': True}, + 'protocol': {'required': True}, + 'public_ip_address': {'required': True}, + 'public_fqdn': {'required': True}, + 'frontend_port': {'required': True}, + 'backend_port': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'protocol': {'key': 'protocol', 'type': 'InboundEndpointProtocol'}, + 'public_ip_address': {'key': 'publicIPAddress', 'type': 'str'}, + 'public_fqdn': {'key': 'publicFQDN', 'type': 'str'}, + 'frontend_port': {'key': 'frontendPort', 'type': 'int'}, + 'backend_port': {'key': 'backendPort', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(InboundEndpoint, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.protocol = kwargs.get('protocol', None) + self.public_ip_address = kwargs.get('public_ip_address', None) + self.public_fqdn = kwargs.get('public_fqdn', None) + self.frontend_port = kwargs.get('frontend_port', None) + self.backend_port = kwargs.get('backend_port', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/inbound_endpoint_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/inbound_endpoint_py3.py new file mode 100644 index 00000000..004e1577 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/inbound_endpoint_py3.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class InboundEndpoint(Model): + """An inbound endpoint on a compute node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the endpoint. + :type name: str + :param protocol: Required. The protocol of the endpoint. Possible values + include: 'tcp', 'udp' + :type protocol: str or ~azure.batch.models.InboundEndpointProtocol + :param public_ip_address: Required. The public IP address of the compute + node. + :type public_ip_address: str + :param public_fqdn: Required. The public fully qualified domain name for + the compute node. + :type public_fqdn: str + :param frontend_port: Required. The public port number of the endpoint. + :type frontend_port: int + :param backend_port: Required. The backend port number of the endpoint. + :type backend_port: int + """ + + _validation = { + 'name': {'required': True}, + 'protocol': {'required': True}, + 'public_ip_address': {'required': True}, + 'public_fqdn': {'required': True}, + 'frontend_port': {'required': True}, + 'backend_port': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'protocol': {'key': 'protocol', 'type': 'InboundEndpointProtocol'}, + 'public_ip_address': {'key': 'publicIPAddress', 'type': 'str'}, + 'public_fqdn': {'key': 'publicFQDN', 'type': 'str'}, + 'frontend_port': {'key': 'frontendPort', 'type': 'int'}, + 'backend_port': {'key': 'backendPort', 'type': 'int'}, + } + + def __init__(self, *, name: str, protocol, public_ip_address: str, public_fqdn: str, frontend_port: int, backend_port: int, **kwargs) -> None: + super(InboundEndpoint, self).__init__(**kwargs) + self.name = name + self.protocol = protocol + self.public_ip_address = public_ip_address + self.public_fqdn = public_fqdn + self.frontend_port = frontend_port + self.backend_port = backend_port diff --git a/azext/generated/sdk/batch/v2018_08_01/models/inbound_nat_pool.py b/azext/generated/sdk/batch/v2018_08_01/models/inbound_nat_pool.py new file mode 100644 index 00000000..bf3209e9 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/inbound_nat_pool.py @@ -0,0 +1,86 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class InboundNATPool(Model): + """A inbound NAT pool that can be used to address specific ports on compute + nodes in a Batch pool externally. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the endpoint. The name must be unique + within a Batch pool, can contain letters, numbers, underscores, periods, + and hyphens. Names must start with a letter or number, must end with a + letter, number, or underscore, and cannot exceed 77 characters. If any + invalid values are provided the request fails with HTTP status code 400. + :type name: str + :param protocol: Required. The protocol of the endpoint. Possible values + include: 'tcp', 'udp' + :type protocol: str or ~azure.batch.models.InboundEndpointProtocol + :param backend_port: Required. The port number on the compute node. This + must be unique within a Batch pool. Acceptable values are between 1 and + 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any + reserved values are provided the request fails with HTTP status code 400. + :type backend_port: int + :param frontend_port_range_start: Required. The first port number in the + range of external ports that will be used to provide inbound access to the + backendPort on individual compute nodes. Acceptable values range between 1 + and 65534 except ports from 50000 to 55000 which are reserved. All ranges + within a pool must be distinct and cannot overlap. Each range must contain + at least 40 ports. If any reserved or overlapping values are provided the + request fails with HTTP status code 400. + :type frontend_port_range_start: int + :param frontend_port_range_end: Required. The last port number in the + range of external ports that will be used to provide inbound access to the + backendPort on individual compute nodes. Acceptable values range between 1 + and 65534 except ports from 50000 to 55000 which are reserved by the Batch + service. All ranges within a pool must be distinct and cannot overlap. + Each range must contain at least 40 ports. If any reserved or overlapping + values are provided the request fails with HTTP status code 400. + :type frontend_port_range_end: int + :param network_security_group_rules: A list of network security group + rules that will be applied to the endpoint. The maximum number of rules + that can be specified across all the endpoints on a Batch pool is 25. If + no network security group rules are specified, a default rule will be + created to allow inbound access to the specified backendPort. If the + maximum number of network security group rules is exceeded the request + fails with HTTP status code 400. + :type network_security_group_rules: + list[~azure.batch.models.NetworkSecurityGroupRule] + """ + + _validation = { + 'name': {'required': True}, + 'protocol': {'required': True}, + 'backend_port': {'required': True}, + 'frontend_port_range_start': {'required': True}, + 'frontend_port_range_end': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'protocol': {'key': 'protocol', 'type': 'InboundEndpointProtocol'}, + 'backend_port': {'key': 'backendPort', 'type': 'int'}, + 'frontend_port_range_start': {'key': 'frontendPortRangeStart', 'type': 'int'}, + 'frontend_port_range_end': {'key': 'frontendPortRangeEnd', 'type': 'int'}, + 'network_security_group_rules': {'key': 'networkSecurityGroupRules', 'type': '[NetworkSecurityGroupRule]'}, + } + + def __init__(self, **kwargs): + super(InboundNATPool, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.protocol = kwargs.get('protocol', None) + self.backend_port = kwargs.get('backend_port', None) + self.frontend_port_range_start = kwargs.get('frontend_port_range_start', None) + self.frontend_port_range_end = kwargs.get('frontend_port_range_end', None) + self.network_security_group_rules = kwargs.get('network_security_group_rules', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/inbound_nat_pool_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/inbound_nat_pool_py3.py new file mode 100644 index 00000000..ed91e373 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/inbound_nat_pool_py3.py @@ -0,0 +1,86 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class InboundNATPool(Model): + """A inbound NAT pool that can be used to address specific ports on compute + nodes in a Batch pool externally. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the endpoint. The name must be unique + within a Batch pool, can contain letters, numbers, underscores, periods, + and hyphens. Names must start with a letter or number, must end with a + letter, number, or underscore, and cannot exceed 77 characters. If any + invalid values are provided the request fails with HTTP status code 400. + :type name: str + :param protocol: Required. The protocol of the endpoint. Possible values + include: 'tcp', 'udp' + :type protocol: str or ~azure.batch.models.InboundEndpointProtocol + :param backend_port: Required. The port number on the compute node. This + must be unique within a Batch pool. Acceptable values are between 1 and + 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any + reserved values are provided the request fails with HTTP status code 400. + :type backend_port: int + :param frontend_port_range_start: Required. The first port number in the + range of external ports that will be used to provide inbound access to the + backendPort on individual compute nodes. Acceptable values range between 1 + and 65534 except ports from 50000 to 55000 which are reserved. All ranges + within a pool must be distinct and cannot overlap. Each range must contain + at least 40 ports. If any reserved or overlapping values are provided the + request fails with HTTP status code 400. + :type frontend_port_range_start: int + :param frontend_port_range_end: Required. The last port number in the + range of external ports that will be used to provide inbound access to the + backendPort on individual compute nodes. Acceptable values range between 1 + and 65534 except ports from 50000 to 55000 which are reserved by the Batch + service. All ranges within a pool must be distinct and cannot overlap. + Each range must contain at least 40 ports. If any reserved or overlapping + values are provided the request fails with HTTP status code 400. + :type frontend_port_range_end: int + :param network_security_group_rules: A list of network security group + rules that will be applied to the endpoint. The maximum number of rules + that can be specified across all the endpoints on a Batch pool is 25. If + no network security group rules are specified, a default rule will be + created to allow inbound access to the specified backendPort. If the + maximum number of network security group rules is exceeded the request + fails with HTTP status code 400. + :type network_security_group_rules: + list[~azure.batch.models.NetworkSecurityGroupRule] + """ + + _validation = { + 'name': {'required': True}, + 'protocol': {'required': True}, + 'backend_port': {'required': True}, + 'frontend_port_range_start': {'required': True}, + 'frontend_port_range_end': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'protocol': {'key': 'protocol', 'type': 'InboundEndpointProtocol'}, + 'backend_port': {'key': 'backendPort', 'type': 'int'}, + 'frontend_port_range_start': {'key': 'frontendPortRangeStart', 'type': 'int'}, + 'frontend_port_range_end': {'key': 'frontendPortRangeEnd', 'type': 'int'}, + 'network_security_group_rules': {'key': 'networkSecurityGroupRules', 'type': '[NetworkSecurityGroupRule]'}, + } + + def __init__(self, *, name: str, protocol, backend_port: int, frontend_port_range_start: int, frontend_port_range_end: int, network_security_group_rules=None, **kwargs) -> None: + super(InboundNATPool, self).__init__(**kwargs) + self.name = name + self.protocol = protocol + self.backend_port = backend_port + self.frontend_port_range_start = frontend_port_range_start + self.frontend_port_range_end = frontend_port_range_end + self.network_security_group_rules = network_security_group_rules diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_add_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_add_options.py new file mode 100644 index 00000000..bdcf7969 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_add_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobAddOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_add_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_add_options_py3.py new file mode 100644 index 00000000..9633e748 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_add_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobAddOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_add_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/job_add_parameter.py new file mode 100644 index 00000000..f79558f4 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_add_parameter.py @@ -0,0 +1,134 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobAddParameter(Model): + """An Azure Batch job to add. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the job within the + account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within an account that differ only by case). + :type id: str + :param display_name: The display name for the job. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param priority: The priority of the job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. The default value is 0. + :type priority: int + :param constraints: The execution constraints for the job. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: Details of a Job Manager task to be launched when + the job is started. If the job does not specify a Job Manager task, the + user must explicitly add tasks to the job. If the job does specify a Job + Manager task, the Batch service creates the Job Manager task when the job + is created, and will try to schedule the Job Manager task before + scheduling other tasks in the job. The Job Manager task's typical purpose + is to control and/or monitor job execution, for example by deciding what + additional tasks to run, determining when the work is complete, etc. + (However, a Job Manager task is not restricted to these activities - it is + a fully-fledged task in the system and perform whatever actions are + required for the job.) For example, a Job Manager task might download a + file specified as a parameter, analyze the contents of that file and + submit additional tasks based on those contents. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation task. If a job has a Job + Preparation task, the Batch service will run the Job Preparation task on a + compute node before starting any tasks of that job on that compute node. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release task. A Job Release task cannot + be specified without also specifying a Job Preparation task for the job. + The Batch service runs the Job Release task on the compute nodes that have + run the Job Preparation task. The primary purpose of the Job Release task + is to undo changes to compute nodes made by the Job Preparation task. + Example activities include deleting local files, or shutting down services + that were started as part of job preparation. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: The list of common environment + variable settings. These environment variables are set for all tasks in + the job (including the Job Manager, Job Preparation and Job Release + tasks). Individual tasks can override an environment setting specified + here by specifying the same setting name with a different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: Required. The pool on which the Batch service runs the + job's tasks. + :type pool_info: ~azure.batch.models.PoolInformation + :param on_all_tasks_complete: The action the Batch service should take + when all tasks in the job are in the completed state. Note that if a job + contains no tasks, then all tasks are considered complete. This option is + therefore most commonly used with a Job Manager task; if you want to use + automatic job termination without a Job Manager, you should initially set + onAllTasksComplete to noaction and update the job properties to set + onAllTasksComplete to terminatejob once you have finished adding tasks. + The default is noaction. Possible values include: 'noAction', + 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + task in the job fails. A task is considered to have failed if has a + failureInfo. A failureInfo is set if the task completes with a non-zero + exit code after exhausting its retry count, or if there was an error + starting the task, for example due to a resource file download error. The + default is noaction. Possible values include: 'noAction', + 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param metadata: A list of name-value pairs associated with the job as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param uses_task_dependencies: Whether tasks in the job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + """ + + _validation = { + 'id': {'required': True}, + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(JobAddParameter, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.priority = kwargs.get('priority', None) + self.constraints = kwargs.get('constraints', None) + self.job_manager_task = kwargs.get('job_manager_task', None) + self.job_preparation_task = kwargs.get('job_preparation_task', None) + self.job_release_task = kwargs.get('job_release_task', None) + self.common_environment_settings = kwargs.get('common_environment_settings', None) + self.pool_info = kwargs.get('pool_info', None) + self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) + self.on_task_failure = kwargs.get('on_task_failure', None) + self.metadata = kwargs.get('metadata', None) + self.uses_task_dependencies = kwargs.get('uses_task_dependencies', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_add_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_add_parameter_py3.py new file mode 100644 index 00000000..e0957755 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_add_parameter_py3.py @@ -0,0 +1,134 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobAddParameter(Model): + """An Azure Batch job to add. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the job within the + account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within an account that differ only by case). + :type id: str + :param display_name: The display name for the job. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param priority: The priority of the job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. The default value is 0. + :type priority: int + :param constraints: The execution constraints for the job. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: Details of a Job Manager task to be launched when + the job is started. If the job does not specify a Job Manager task, the + user must explicitly add tasks to the job. If the job does specify a Job + Manager task, the Batch service creates the Job Manager task when the job + is created, and will try to schedule the Job Manager task before + scheduling other tasks in the job. The Job Manager task's typical purpose + is to control and/or monitor job execution, for example by deciding what + additional tasks to run, determining when the work is complete, etc. + (However, a Job Manager task is not restricted to these activities - it is + a fully-fledged task in the system and perform whatever actions are + required for the job.) For example, a Job Manager task might download a + file specified as a parameter, analyze the contents of that file and + submit additional tasks based on those contents. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation task. If a job has a Job + Preparation task, the Batch service will run the Job Preparation task on a + compute node before starting any tasks of that job on that compute node. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release task. A Job Release task cannot + be specified without also specifying a Job Preparation task for the job. + The Batch service runs the Job Release task on the compute nodes that have + run the Job Preparation task. The primary purpose of the Job Release task + is to undo changes to compute nodes made by the Job Preparation task. + Example activities include deleting local files, or shutting down services + that were started as part of job preparation. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: The list of common environment + variable settings. These environment variables are set for all tasks in + the job (including the Job Manager, Job Preparation and Job Release + tasks). Individual tasks can override an environment setting specified + here by specifying the same setting name with a different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: Required. The pool on which the Batch service runs the + job's tasks. + :type pool_info: ~azure.batch.models.PoolInformation + :param on_all_tasks_complete: The action the Batch service should take + when all tasks in the job are in the completed state. Note that if a job + contains no tasks, then all tasks are considered complete. This option is + therefore most commonly used with a Job Manager task; if you want to use + automatic job termination without a Job Manager, you should initially set + onAllTasksComplete to noaction and update the job properties to set + onAllTasksComplete to terminatejob once you have finished adding tasks. + The default is noaction. Possible values include: 'noAction', + 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + task in the job fails. A task is considered to have failed if has a + failureInfo. A failureInfo is set if the task completes with a non-zero + exit code after exhausting its retry count, or if there was an error + starting the task, for example due to a resource file download error. The + default is noaction. Possible values include: 'noAction', + 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param metadata: A list of name-value pairs associated with the job as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param uses_task_dependencies: Whether tasks in the job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + """ + + _validation = { + 'id': {'required': True}, + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + } + + def __init__(self, *, id: str, pool_info, display_name: str=None, priority: int=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, on_all_tasks_complete=None, on_task_failure=None, metadata=None, uses_task_dependencies: bool=None, **kwargs) -> None: + super(JobAddParameter, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.priority = priority + self.constraints = constraints + self.job_manager_task = job_manager_task + self.job_preparation_task = job_preparation_task + self.job_release_task = job_release_task + self.common_environment_settings = common_environment_settings + self.pool_info = pool_info + self.on_all_tasks_complete = on_all_tasks_complete + self.on_task_failure = on_task_failure + self.metadata = metadata + self.uses_task_dependencies = uses_task_dependencies diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_constraints.py b/azext/generated/sdk/batch/v2018_08_01/models/job_constraints.py new file mode 100644 index 00000000..070a37ae --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_constraints.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobConstraints(Model): + """The execution constraints for a job. + + :param max_wall_clock_time: The maximum elapsed time that the job may run, + measured from the time the job is created. If the job does not complete + within the time limit, the Batch service terminates it and any tasks that + are still running. In this case, the termination reason will be + MaxWallClockTimeExpiry. If this property is not specified, there is no + time limit on how long the job may run. + :type max_wall_clock_time: timedelta + :param max_task_retry_count: The maximum number of times each task may be + retried. The Batch service retries a task if its exit code is nonzero. + Note that this value specifically controls the number of retries. The + Batch service will try each task once, and may then retry up to this + limit. For example, if the maximum retry count is 3, Batch tries a task up + to 4 times (one initial try and 3 retries). If the maximum retry count is + 0, the Batch service does not retry tasks. If the maximum retry count is + -1, the Batch service retries tasks without limit. The default value is 0 + (no retries). + :type max_task_retry_count: int + """ + + _attribute_map = { + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(JobConstraints, self).__init__(**kwargs) + self.max_wall_clock_time = kwargs.get('max_wall_clock_time', None) + self.max_task_retry_count = kwargs.get('max_task_retry_count', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_constraints_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_constraints_py3.py new file mode 100644 index 00000000..06340ebe --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_constraints_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobConstraints(Model): + """The execution constraints for a job. + + :param max_wall_clock_time: The maximum elapsed time that the job may run, + measured from the time the job is created. If the job does not complete + within the time limit, the Batch service terminates it and any tasks that + are still running. In this case, the termination reason will be + MaxWallClockTimeExpiry. If this property is not specified, there is no + time limit on how long the job may run. + :type max_wall_clock_time: timedelta + :param max_task_retry_count: The maximum number of times each task may be + retried. The Batch service retries a task if its exit code is nonzero. + Note that this value specifically controls the number of retries. The + Batch service will try each task once, and may then retry up to this + limit. For example, if the maximum retry count is 3, Batch tries a task up + to 4 times (one initial try and 3 retries). If the maximum retry count is + 0, the Batch service does not retry tasks. If the maximum retry count is + -1, the Batch service retries tasks without limit. The default value is 0 + (no retries). + :type max_task_retry_count: int + """ + + _attribute_map = { + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + } + + def __init__(self, *, max_wall_clock_time=None, max_task_retry_count: int=None, **kwargs) -> None: + super(JobConstraints, self).__init__(**kwargs) + self.max_wall_clock_time = max_wall_clock_time + self.max_task_retry_count = max_task_retry_count diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_delete_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_delete_options.py new file mode 100644 index 00000000..a537b55e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_delete_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobDeleteOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_delete_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_delete_options_py3.py new file mode 100644 index 00000000..821db0e8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_delete_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobDeleteOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_disable_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_disable_options.py new file mode 100644 index 00000000..c6694516 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_disable_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDisableOptions(Model): + """Additional parameters for disable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobDisableOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_disable_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_disable_options_py3.py new file mode 100644 index 00000000..4b077714 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_disable_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDisableOptions(Model): + """Additional parameters for disable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobDisableOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_disable_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/job_disable_parameter.py new file mode 100644 index 00000000..d86c965f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_disable_parameter.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDisableParameter(Model): + """Options when disabling a job. + + All required parameters must be populated in order to send to Azure. + + :param disable_tasks: Required. What to do with active tasks associated + with the job. Possible values include: 'requeue', 'terminate', 'wait' + :type disable_tasks: str or ~azure.batch.models.DisableJobOption + """ + + _validation = { + 'disable_tasks': {'required': True}, + } + + _attribute_map = { + 'disable_tasks': {'key': 'disableTasks', 'type': 'DisableJobOption'}, + } + + def __init__(self, **kwargs): + super(JobDisableParameter, self).__init__(**kwargs) + self.disable_tasks = kwargs.get('disable_tasks', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_disable_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_disable_parameter_py3.py new file mode 100644 index 00000000..fd99f78e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_disable_parameter_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDisableParameter(Model): + """Options when disabling a job. + + All required parameters must be populated in order to send to Azure. + + :param disable_tasks: Required. What to do with active tasks associated + with the job. Possible values include: 'requeue', 'terminate', 'wait' + :type disable_tasks: str or ~azure.batch.models.DisableJobOption + """ + + _validation = { + 'disable_tasks': {'required': True}, + } + + _attribute_map = { + 'disable_tasks': {'key': 'disableTasks', 'type': 'DisableJobOption'}, + } + + def __init__(self, *, disable_tasks, **kwargs) -> None: + super(JobDisableParameter, self).__init__(**kwargs) + self.disable_tasks = disable_tasks diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_enable_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_enable_options.py new file mode 100644 index 00000000..182f2b04 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_enable_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobEnableOptions(Model): + """Additional parameters for enable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobEnableOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_enable_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_enable_options_py3.py new file mode 100644 index 00000000..47695f37 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_enable_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobEnableOptions(Model): + """Additional parameters for enable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobEnableOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_execution_information.py b/azext/generated/sdk/batch/v2018_08_01/models/job_execution_information.py new file mode 100644 index 00000000..28f5a31d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_execution_information.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobExecutionInformation(Model): + """Contains information about the execution of a job in the Azure Batch + service. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the job. This is the time + at which the job was created. + :type start_time: datetime + :param end_time: The completion time of the job. This property is set only + if the job is in the completed state. + :type end_time: datetime + :param pool_id: The ID of the pool to which this job is assigned. This + element contains the actual pool where the job is assigned. When you get + job details from the service, they also contain a poolInfo element, which + contains the pool configuration data from when the job was added or + updated. That poolInfo element may also contain a poolId element. If it + does, the two IDs are the same. If it does not, it means the job ran on an + auto pool, and this property contains the ID of that auto pool. + :type pool_id: str + :param scheduling_error: Details of any error encountered by the service + in starting the job. This property is not set if there was no error + starting the job. + :type scheduling_error: ~azure.batch.models.JobSchedulingError + :param terminate_reason: A string describing the reason the job ended. + This property is set only if the job is in the completed state. If the + Batch service terminates the job, it sets the reason as follows: + JMComplete - the Job Manager task completed, and killJobOnCompletion was + set to true. MaxWallClockTimeExpiry - the job reached its maxWallClockTime + constraint. TerminateJobSchedule - the job ran as part of a schedule, and + the schedule terminated. AllTasksComplete - the job's onAllTasksComplete + attribute is set to terminatejob, and all tasks in the job are complete. + TaskFailed - the job's onTaskFailure attribute is set to + performExitOptionsJobAction, and a task in the job failed with an exit + condition that specified a jobAction of terminatejob. Any other string is + a user-defined reason specified in a call to the 'Terminate a job' + operation. + :type terminate_reason: str + """ + + _validation = { + 'start_time': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'scheduling_error': {'key': 'schedulingError', 'type': 'JobSchedulingError'}, + 'terminate_reason': {'key': 'terminateReason', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(JobExecutionInformation, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.pool_id = kwargs.get('pool_id', None) + self.scheduling_error = kwargs.get('scheduling_error', None) + self.terminate_reason = kwargs.get('terminate_reason', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_execution_information_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_execution_information_py3.py new file mode 100644 index 00000000..436d2990 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_execution_information_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobExecutionInformation(Model): + """Contains information about the execution of a job in the Azure Batch + service. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the job. This is the time + at which the job was created. + :type start_time: datetime + :param end_time: The completion time of the job. This property is set only + if the job is in the completed state. + :type end_time: datetime + :param pool_id: The ID of the pool to which this job is assigned. This + element contains the actual pool where the job is assigned. When you get + job details from the service, they also contain a poolInfo element, which + contains the pool configuration data from when the job was added or + updated. That poolInfo element may also contain a poolId element. If it + does, the two IDs are the same. If it does not, it means the job ran on an + auto pool, and this property contains the ID of that auto pool. + :type pool_id: str + :param scheduling_error: Details of any error encountered by the service + in starting the job. This property is not set if there was no error + starting the job. + :type scheduling_error: ~azure.batch.models.JobSchedulingError + :param terminate_reason: A string describing the reason the job ended. + This property is set only if the job is in the completed state. If the + Batch service terminates the job, it sets the reason as follows: + JMComplete - the Job Manager task completed, and killJobOnCompletion was + set to true. MaxWallClockTimeExpiry - the job reached its maxWallClockTime + constraint. TerminateJobSchedule - the job ran as part of a schedule, and + the schedule terminated. AllTasksComplete - the job's onAllTasksComplete + attribute is set to terminatejob, and all tasks in the job are complete. + TaskFailed - the job's onTaskFailure attribute is set to + performExitOptionsJobAction, and a task in the job failed with an exit + condition that specified a jobAction of terminatejob. Any other string is + a user-defined reason specified in a call to the 'Terminate a job' + operation. + :type terminate_reason: str + """ + + _validation = { + 'start_time': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'scheduling_error': {'key': 'schedulingError', 'type': 'JobSchedulingError'}, + 'terminate_reason': {'key': 'terminateReason', 'type': 'str'}, + } + + def __init__(self, *, start_time, end_time=None, pool_id: str=None, scheduling_error=None, terminate_reason: str=None, **kwargs) -> None: + super(JobExecutionInformation, self).__init__(**kwargs) + self.start_time = start_time + self.end_time = end_time + self.pool_id = pool_id + self.scheduling_error = scheduling_error + self.terminate_reason = terminate_reason diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_get_all_lifetime_statistics_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_get_all_lifetime_statistics_options.py new file mode 100644 index 00000000..a8f7e849 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_get_all_lifetime_statistics_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetAllLifetimeStatisticsOptions(Model): + """Additional parameters for get_all_lifetime_statistics operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobGetAllLifetimeStatisticsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_get_all_lifetime_statistics_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_get_all_lifetime_statistics_options_py3.py new file mode 100644 index 00000000..2092bbd8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_get_all_lifetime_statistics_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetAllLifetimeStatisticsOptions(Model): + """Additional parameters for get_all_lifetime_statistics operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobGetAllLifetimeStatisticsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_get_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_get_options.py new file mode 100644 index 00000000..62d47959 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_get_options.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_get_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_get_options_py3.py new file mode 100644 index 00000000..9ed21fc3 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_get_options_py3.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobGetOptions, self).__init__(**kwargs) + self.select = select + self.expand = expand + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_get_task_counts_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_get_task_counts_options.py new file mode 100644 index 00000000..603d79ce --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_get_task_counts_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetTaskCountsOptions(Model): + """Additional parameters for get_task_counts operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobGetTaskCountsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_get_task_counts_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_get_task_counts_options_py3.py new file mode 100644 index 00000000..b109e59e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_get_task_counts_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetTaskCountsOptions(Model): + """Additional parameters for get_task_counts operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobGetTaskCountsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_list_from_job_schedule_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_list_from_job_schedule_options.py new file mode 100644 index 00000000..7f95aaf7 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_list_from_job_schedule_options.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListFromJobScheduleOptions(Model): + """Additional parameters for list_from_job_schedule operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 jobs can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobListFromJobScheduleOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_list_from_job_schedule_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_list_from_job_schedule_options_py3.py new file mode 100644 index 00000000..eb606478 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_list_from_job_schedule_options_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListFromJobScheduleOptions(Model): + """Additional parameters for list_from_job_schedule operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 jobs can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobListFromJobScheduleOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.expand = expand + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_list_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_list_options.py new file mode 100644 index 00000000..b9d34191 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_list_options.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 jobs can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_list_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_list_options_py3.py new file mode 100644 index 00000000..f7787cd7 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_list_options_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 jobs can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.expand = expand + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_list_preparation_and_release_task_status_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_list_preparation_and_release_task_status_options.py new file mode 100644 index 00000000..443ebba5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_list_preparation_and_release_task_status_options.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListPreparationAndReleaseTaskStatusOptions(Model): + """Additional parameters for list_preparation_and_release_task_status + operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 tasks can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobListPreparationAndReleaseTaskStatusOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_list_preparation_and_release_task_status_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_list_preparation_and_release_task_status_options_py3.py new file mode 100644 index 00000000..a7353629 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_list_preparation_and_release_task_status_options_py3.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListPreparationAndReleaseTaskStatusOptions(Model): + """Additional parameters for list_preparation_and_release_task_status + operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 tasks can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobListPreparationAndReleaseTaskStatusOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_manager_task.py b/azext/generated/sdk/batch/v2018_08_01/models/job_manager_task.py new file mode 100644 index 00000000..38d54ac0 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_manager_task.py @@ -0,0 +1,182 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobManagerTask(Model): + """Specifies details of a Job Manager task. + + The Job Manager task is automatically started when the job is created. The + Batch service tries to schedule the Job Manager task before any other tasks + in the job. When shrinking a pool, the Batch service tries to preserve + compute nodes where Job Manager tasks are running for as long as possible + (that is, nodes running 'normal' tasks are removed before nodes running Job + Manager tasks). When a Job Manager task fails and needs to be restarted, + the system tries to schedule it at the highest priority. If there are no + idle nodes available, the system may terminate one of the running tasks in + the pool and return it to the queue in order to make room for the Job + Manager task to restart. Note that a Job Manager task in one job does not + have priority over tasks in other jobs. Across jobs, only job level + priorities are observed. For example, if a Job Manager in a priority 0 job + needs to be restarted, it will not displace tasks of a priority 1 job. + Batch will retry tasks when a recovery operation is triggered on a compute + node. Examples of recovery operations include (but are not limited to) when + an unhealthy compute node is rebooted or a compute node disappeared due to + host failure. Retries due to recovery operations are independent of and are + not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is + 0, an internal retry due to a recovery operation may occur. Because of + this, all tasks should be idempotent. This means tasks need to tolerate + being interrupted and restarted without causing any corruption or duplicate + data. The best practice for long running tasks is to use some form of + checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the Job Manager + task within the job. The ID can contain any combination of alphanumeric + characters including hyphens and underscores and cannot contain more than + 64 characters. + :type id: str + :param display_name: The display name of the Job Manager task. It need not + be unique and can contain any Unicode characters up to a maximum length of + 1024. + :type display_name: str + :param command_line: Required. The command line of the Job Manager task. + The command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Manager task runs. If the pool that will run this task has + containerConfiguration set, this must be set as well. If the pool that + will run this task doesn't have containerConfiguration set, this must not + be set. When this is specified, all directories recursively below the + AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) + are mapped into the container, all task environment variables are mapped + into the container, and the task command line is executed in the + container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. Files listed + under this element are located in the task's working directory. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the compute node after running the command line. For multi-instance + tasks, the files will only be uploaded from the compute node on which the + primary task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the Job Manager task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param constraints: Constraints that apply to the Job Manager task. + :type constraints: ~azure.batch.models.TaskConstraints + :param kill_job_on_completion: Whether completion of the Job Manager task + signifies completion of the entire job. If true, when the Job Manager task + completes, the Batch service marks the job as complete. If any tasks are + still running at this time (other than Job Release), those tasks are + terminated. If false, the completion of the Job Manager task does not + affect the job status. In this case, you should either use the + onAllTasksComplete attribute to terminate the job, or have a client or + user terminate the job explicitly. An example of this is if the Job + Manager creates a set of tasks but then takes no further role in their + execution. The default value is true. If you are using the + onAllTasksComplete and onTaskFailure attributes to control job lifetime, + and using the Job Manager task only to create the tasks for the job (not + to monitor progress), then it is important to set killJobOnCompletion to + false. + :type kill_job_on_completion: bool + :param user_identity: The user identity under which the Job Manager task + runs. If omitted, the task runs as a non-administrative user unique to the + task. + :type user_identity: ~azure.batch.models.UserIdentity + :param run_exclusive: Whether the Job Manager task requires exclusive use + of the compute node where it runs. If true, no other tasks will run on the + same compute node for as long as the Job Manager is running. If false, + other tasks can run simultaneously with the Job Manager on a compute node. + The Job Manager task counts normally against the node's concurrent task + limit, so this is only relevant if the node allows multiple concurrent + tasks. The default value is true. + :type run_exclusive: bool + :param application_package_references: A list of application packages that + the Batch service will deploy to the compute node before running the + command line. Application packages are downloaded and deployed to a shared + directory, not the task working directory. Therefore, if a referenced + package is already on the compute node, and is up to date, then it is not + re-downloaded; the existing copy on the compute node is used. If a + referenced application package cannot be installed, for example because + the package has been deleted or because download failed, the task fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the task can use to perform Batch service operations. If this + property is set, the Batch service provides the task with an + authentication token which can be used to authenticate Batch service + operations without requiring an account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the task can carry out using the token depend on the settings. For + example, a task can request job permissions in order to add other tasks to + the job, or check the status of the job or of other tasks under the job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + :param allow_low_priority_node: Whether the Job Manager task may run on a + low-priority compute node. The default value is true. + :type allow_low_priority_node: bool + """ + + _validation = { + 'id': {'required': True}, + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'kill_job_on_completion': {'key': 'killJobOnCompletion', 'type': 'bool'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'run_exclusive': {'key': 'runExclusive', 'type': 'bool'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + 'allow_low_priority_node': {'key': 'allowLowPriorityNode', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(JobManagerTask, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.resource_files = kwargs.get('resource_files', None) + self.output_files = kwargs.get('output_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.constraints = kwargs.get('constraints', None) + self.kill_job_on_completion = kwargs.get('kill_job_on_completion', None) + self.user_identity = kwargs.get('user_identity', None) + self.run_exclusive = kwargs.get('run_exclusive', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.authentication_token_settings = kwargs.get('authentication_token_settings', None) + self.allow_low_priority_node = kwargs.get('allow_low_priority_node', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_manager_task_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_manager_task_py3.py new file mode 100644 index 00000000..668b182b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_manager_task_py3.py @@ -0,0 +1,182 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobManagerTask(Model): + """Specifies details of a Job Manager task. + + The Job Manager task is automatically started when the job is created. The + Batch service tries to schedule the Job Manager task before any other tasks + in the job. When shrinking a pool, the Batch service tries to preserve + compute nodes where Job Manager tasks are running for as long as possible + (that is, nodes running 'normal' tasks are removed before nodes running Job + Manager tasks). When a Job Manager task fails and needs to be restarted, + the system tries to schedule it at the highest priority. If there are no + idle nodes available, the system may terminate one of the running tasks in + the pool and return it to the queue in order to make room for the Job + Manager task to restart. Note that a Job Manager task in one job does not + have priority over tasks in other jobs. Across jobs, only job level + priorities are observed. For example, if a Job Manager in a priority 0 job + needs to be restarted, it will not displace tasks of a priority 1 job. + Batch will retry tasks when a recovery operation is triggered on a compute + node. Examples of recovery operations include (but are not limited to) when + an unhealthy compute node is rebooted or a compute node disappeared due to + host failure. Retries due to recovery operations are independent of and are + not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is + 0, an internal retry due to a recovery operation may occur. Because of + this, all tasks should be idempotent. This means tasks need to tolerate + being interrupted and restarted without causing any corruption or duplicate + data. The best practice for long running tasks is to use some form of + checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the Job Manager + task within the job. The ID can contain any combination of alphanumeric + characters including hyphens and underscores and cannot contain more than + 64 characters. + :type id: str + :param display_name: The display name of the Job Manager task. It need not + be unique and can contain any Unicode characters up to a maximum length of + 1024. + :type display_name: str + :param command_line: Required. The command line of the Job Manager task. + The command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Manager task runs. If the pool that will run this task has + containerConfiguration set, this must be set as well. If the pool that + will run this task doesn't have containerConfiguration set, this must not + be set. When this is specified, all directories recursively below the + AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) + are mapped into the container, all task environment variables are mapped + into the container, and the task command line is executed in the + container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. Files listed + under this element are located in the task's working directory. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the compute node after running the command line. For multi-instance + tasks, the files will only be uploaded from the compute node on which the + primary task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the Job Manager task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param constraints: Constraints that apply to the Job Manager task. + :type constraints: ~azure.batch.models.TaskConstraints + :param kill_job_on_completion: Whether completion of the Job Manager task + signifies completion of the entire job. If true, when the Job Manager task + completes, the Batch service marks the job as complete. If any tasks are + still running at this time (other than Job Release), those tasks are + terminated. If false, the completion of the Job Manager task does not + affect the job status. In this case, you should either use the + onAllTasksComplete attribute to terminate the job, or have a client or + user terminate the job explicitly. An example of this is if the Job + Manager creates a set of tasks but then takes no further role in their + execution. The default value is true. If you are using the + onAllTasksComplete and onTaskFailure attributes to control job lifetime, + and using the Job Manager task only to create the tasks for the job (not + to monitor progress), then it is important to set killJobOnCompletion to + false. + :type kill_job_on_completion: bool + :param user_identity: The user identity under which the Job Manager task + runs. If omitted, the task runs as a non-administrative user unique to the + task. + :type user_identity: ~azure.batch.models.UserIdentity + :param run_exclusive: Whether the Job Manager task requires exclusive use + of the compute node where it runs. If true, no other tasks will run on the + same compute node for as long as the Job Manager is running. If false, + other tasks can run simultaneously with the Job Manager on a compute node. + The Job Manager task counts normally against the node's concurrent task + limit, so this is only relevant if the node allows multiple concurrent + tasks. The default value is true. + :type run_exclusive: bool + :param application_package_references: A list of application packages that + the Batch service will deploy to the compute node before running the + command line. Application packages are downloaded and deployed to a shared + directory, not the task working directory. Therefore, if a referenced + package is already on the compute node, and is up to date, then it is not + re-downloaded; the existing copy on the compute node is used. If a + referenced application package cannot be installed, for example because + the package has been deleted or because download failed, the task fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the task can use to perform Batch service operations. If this + property is set, the Batch service provides the task with an + authentication token which can be used to authenticate Batch service + operations without requiring an account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the task can carry out using the token depend on the settings. For + example, a task can request job permissions in order to add other tasks to + the job, or check the status of the job or of other tasks under the job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + :param allow_low_priority_node: Whether the Job Manager task may run on a + low-priority compute node. The default value is true. + :type allow_low_priority_node: bool + """ + + _validation = { + 'id': {'required': True}, + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'kill_job_on_completion': {'key': 'killJobOnCompletion', 'type': 'bool'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'run_exclusive': {'key': 'runExclusive', 'type': 'bool'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + 'allow_low_priority_node': {'key': 'allowLowPriorityNode', 'type': 'bool'}, + } + + def __init__(self, *, id: str, command_line: str, display_name: str=None, container_settings=None, resource_files=None, output_files=None, environment_settings=None, constraints=None, kill_job_on_completion: bool=None, user_identity=None, run_exclusive: bool=None, application_package_references=None, authentication_token_settings=None, allow_low_priority_node: bool=None, **kwargs) -> None: + super(JobManagerTask, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.command_line = command_line + self.container_settings = container_settings + self.resource_files = resource_files + self.output_files = output_files + self.environment_settings = environment_settings + self.constraints = constraints + self.kill_job_on_completion = kill_job_on_completion + self.user_identity = user_identity + self.run_exclusive = run_exclusive + self.application_package_references = application_package_references + self.authentication_token_settings = authentication_token_settings + self.allow_low_priority_node = allow_low_priority_node diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_patch_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_patch_options.py new file mode 100644 index 00000000..9fdbb4f3 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_patch_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobPatchOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_patch_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_patch_options_py3.py new file mode 100644 index 00000000..586e381d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_patch_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobPatchOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_patch_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/job_patch_parameter.py new file mode 100644 index 00000000..2088a145 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_patch_parameter.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPatchParameter(Model): + """The set of changes to be made to a job. + + :param priority: The priority of the job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. If omitted, the priority of the job is left unchanged. + :type priority: int + :param on_all_tasks_complete: The action the Batch service should take + when all tasks in the job are in the completed state. If omitted, the + completion behavior is left unchanged. You may not change the value from + terminatejob to noaction - that is, once you have engaged automatic job + termination, you cannot turn it off again. If you try to do this, the + request fails with an 'invalid property value' error response; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + Possible values include: 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param constraints: The execution constraints for the job. If omitted, the + existing execution constraints are left unchanged. + :type constraints: ~azure.batch.models.JobConstraints + :param pool_info: The pool on which the Batch service runs the job's + tasks. You may change the pool for a job only when the job is disabled. + The Patch Job call will fail if you include the poolInfo element and the + job is not disabled. If you specify an autoPoolSpecification specification + in the poolInfo, only the keepAlive property can be updated, and then only + if the auto pool has a poolLifetimeOption of job. If omitted, the job + continues to run on its current pool. + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with the job as + metadata. If omitted, the existing job metadata is left unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(JobPatchParameter, self).__init__(**kwargs) + self.priority = kwargs.get('priority', None) + self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) + self.constraints = kwargs.get('constraints', None) + self.pool_info = kwargs.get('pool_info', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_patch_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_patch_parameter_py3.py new file mode 100644 index 00000000..af6da2aa --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_patch_parameter_py3.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPatchParameter(Model): + """The set of changes to be made to a job. + + :param priority: The priority of the job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. If omitted, the priority of the job is left unchanged. + :type priority: int + :param on_all_tasks_complete: The action the Batch service should take + when all tasks in the job are in the completed state. If omitted, the + completion behavior is left unchanged. You may not change the value from + terminatejob to noaction - that is, once you have engaged automatic job + termination, you cannot turn it off again. If you try to do this, the + request fails with an 'invalid property value' error response; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + Possible values include: 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param constraints: The execution constraints for the job. If omitted, the + existing execution constraints are left unchanged. + :type constraints: ~azure.batch.models.JobConstraints + :param pool_info: The pool on which the Batch service runs the job's + tasks. You may change the pool for a job only when the job is disabled. + The Patch Job call will fail if you include the poolInfo element and the + job is not disabled. If you specify an autoPoolSpecification specification + in the poolInfo, only the keepAlive property can be updated, and then only + if the auto pool has a poolLifetimeOption of job. If omitted, the job + continues to run on its current pool. + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with the job as + metadata. If omitted, the existing job metadata is left unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, priority: int=None, on_all_tasks_complete=None, constraints=None, pool_info=None, metadata=None, **kwargs) -> None: + super(JobPatchParameter, self).__init__(**kwargs) + self.priority = priority + self.on_all_tasks_complete = on_all_tasks_complete + self.constraints = constraints + self.pool_info = pool_info + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_preparation_and_release_task_execution_information.py b/azext/generated/sdk/batch/v2018_08_01/models/job_preparation_and_release_task_execution_information.py new file mode 100644 index 00000000..44356460 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_preparation_and_release_task_execution_information.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationAndReleaseTaskExecutionInformation(Model): + """The status of the Job Preparation and Job Release tasks on a compute node. + + :param pool_id: The ID of the pool containing the compute node to which + this entry refers. + :type pool_id: str + :param node_id: The ID of the compute node to which this entry refers. + :type node_id: str + :param node_url: The URL of the compute node to which this entry refers. + :type node_url: str + :param job_preparation_task_execution_info: Information about the + execution status of the Job Preparation task on this compute node. + :type job_preparation_task_execution_info: + ~azure.batch.models.JobPreparationTaskExecutionInformation + :param job_release_task_execution_info: Information about the execution + status of the Job Release task on this compute node. This property is set + only if the Job Release task has run on the node. + :type job_release_task_execution_info: + ~azure.batch.models.JobReleaseTaskExecutionInformation + """ + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'node_id': {'key': 'nodeId', 'type': 'str'}, + 'node_url': {'key': 'nodeUrl', 'type': 'str'}, + 'job_preparation_task_execution_info': {'key': 'jobPreparationTaskExecutionInfo', 'type': 'JobPreparationTaskExecutionInformation'}, + 'job_release_task_execution_info': {'key': 'jobReleaseTaskExecutionInfo', 'type': 'JobReleaseTaskExecutionInformation'}, + } + + def __init__(self, **kwargs): + super(JobPreparationAndReleaseTaskExecutionInformation, self).__init__(**kwargs) + self.pool_id = kwargs.get('pool_id', None) + self.node_id = kwargs.get('node_id', None) + self.node_url = kwargs.get('node_url', None) + self.job_preparation_task_execution_info = kwargs.get('job_preparation_task_execution_info', None) + self.job_release_task_execution_info = kwargs.get('job_release_task_execution_info', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_preparation_and_release_task_execution_information_paged.py b/azext/generated/sdk/batch/v2018_08_01/models/job_preparation_and_release_task_execution_information_paged.py new file mode 100644 index 00000000..f1f7d3c4 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_preparation_and_release_task_execution_information_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class JobPreparationAndReleaseTaskExecutionInformationPaged(Paged): + """ + A paging container for iterating over a list of :class:`JobPreparationAndReleaseTaskExecutionInformation ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[JobPreparationAndReleaseTaskExecutionInformation]'} + } + + def __init__(self, *args, **kwargs): + + super(JobPreparationAndReleaseTaskExecutionInformationPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_preparation_and_release_task_execution_information_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_preparation_and_release_task_execution_information_py3.py new file mode 100644 index 00000000..35520702 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_preparation_and_release_task_execution_information_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationAndReleaseTaskExecutionInformation(Model): + """The status of the Job Preparation and Job Release tasks on a compute node. + + :param pool_id: The ID of the pool containing the compute node to which + this entry refers. + :type pool_id: str + :param node_id: The ID of the compute node to which this entry refers. + :type node_id: str + :param node_url: The URL of the compute node to which this entry refers. + :type node_url: str + :param job_preparation_task_execution_info: Information about the + execution status of the Job Preparation task on this compute node. + :type job_preparation_task_execution_info: + ~azure.batch.models.JobPreparationTaskExecutionInformation + :param job_release_task_execution_info: Information about the execution + status of the Job Release task on this compute node. This property is set + only if the Job Release task has run on the node. + :type job_release_task_execution_info: + ~azure.batch.models.JobReleaseTaskExecutionInformation + """ + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'node_id': {'key': 'nodeId', 'type': 'str'}, + 'node_url': {'key': 'nodeUrl', 'type': 'str'}, + 'job_preparation_task_execution_info': {'key': 'jobPreparationTaskExecutionInfo', 'type': 'JobPreparationTaskExecutionInformation'}, + 'job_release_task_execution_info': {'key': 'jobReleaseTaskExecutionInfo', 'type': 'JobReleaseTaskExecutionInformation'}, + } + + def __init__(self, *, pool_id: str=None, node_id: str=None, node_url: str=None, job_preparation_task_execution_info=None, job_release_task_execution_info=None, **kwargs) -> None: + super(JobPreparationAndReleaseTaskExecutionInformation, self).__init__(**kwargs) + self.pool_id = pool_id + self.node_id = node_id + self.node_url = node_url + self.job_preparation_task_execution_info = job_preparation_task_execution_info + self.job_release_task_execution_info = job_release_task_execution_info diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_preparation_task.py b/azext/generated/sdk/batch/v2018_08_01/models/job_preparation_task.py new file mode 100644 index 00000000..acf8da03 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_preparation_task.py @@ -0,0 +1,146 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationTask(Model): + """A Job Preparation task to run before any tasks of the job on any given + compute node. + + You can use Job Preparation to prepare a compute node to run tasks for the + job. Activities commonly performed in Job Preparation include: Downloading + common resource files used by all the tasks in the job. The Job Preparation + task can download these common resource files to the shared location on the + compute node. (AZ_BATCH_NODE_ROOT_DIR\shared), or starting a local service + on the compute node so that all tasks of that job can communicate with it. + If the Job Preparation task fails (that is, exhausts its retry count before + exiting with exit code 0), Batch will not run tasks of this job on the + compute node. The node remains ineligible to run tasks of this job until it + is reimaged. The node remains active and can be used for other jobs. The + Job Preparation task can run multiple times on the same compute node. + Therefore, you should write the Job Preparation task to handle + re-execution. If the compute node is rebooted, the Job Preparation task is + run again on the node before scheduling any other task of the job, if + rerunOnNodeRebootAfterSuccess is true or if the Job Preparation task did + not previously complete. If the compute node is reimaged, the Job + Preparation task is run again before scheduling any task of the job. Batch + will retry tasks when a recovery operation is triggered on a compute node. + Examples of recovery operations include (but are not limited to) when an + unhealthy compute node is rebooted or a compute node disappeared due to + host failure. Retries due to recovery operations are independent of and are + not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is + 0, an internal retry due to a recovery operation may occur. Because of + this, all tasks should be idempotent. This means tasks need to tolerate + being interrupted and restarted without causing any corruption or duplicate + data. The best practice for long running tasks is to use some form of + checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: A string that uniquely identifies the Job Preparation task + within the job. The ID can contain any combination of alphanumeric + characters including hyphens and underscores and cannot contain more than + 64 characters. If you do not specify this property, the Batch service + assigns a default value of 'jobpreparation'. No other task in the job can + have the same ID as the Job Preparation task. If you try to submit a task + with the same id, the Batch service rejects the request with error code + TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, + the HTTP status code is 409 (Conflict). + :type id: str + :param command_line: Required. The command line of the Job Preparation + task. The command line does not run under a shell, and therefore cannot + take advantage of shell features such as environment variable expansion. + If you want to take advantage of such features, you should invoke the + shell in the command line, for example using "cmd /c MyCommand" in Windows + or "/bin/sh -c MyCommand" in Linux. If the command line refers to file + paths, it should use a relative path (relative to the task working + directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Preparation task runs. When this is specified, all directories + recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch + directories on the node) are mapped into the container, all task + environment variables are mapped into the container, and the task command + line is executed in the container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. Files listed + under this element are located in the task's working directory. There is + a maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the Job Preparation task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param constraints: Constraints that apply to the Job Preparation task. + :type constraints: ~azure.batch.models.TaskConstraints + :param wait_for_success: Whether the Batch service should wait for the Job + Preparation task to complete successfully before scheduling any other + tasks of the job on the compute node. A Job Preparation task has completed + successfully if it exits with exit code 0. If true and the Job Preparation + task fails on a compute node, the Batch service retries the Job + Preparation task up to its maximum retry count (as specified in the + constraints element). If the task has still not completed successfully + after all retries, then the Batch service will not schedule tasks of the + job to the compute node. The compute node remains active and eligible to + run tasks of other jobs. If false, the Batch service will not wait for the + Job Preparation task to complete. In this case, other tasks of the job can + start executing on the compute node while the Job Preparation task is + still running; and even if the Job Preparation task fails, new tasks will + continue to be scheduled on the node. The default value is true. + :type wait_for_success: bool + :param user_identity: The user identity under which the Job Preparation + task runs. If omitted, the task runs as a non-administrative user unique + to the task on Windows nodes, or a non-administrative user unique to the + pool on Linux nodes. + :type user_identity: ~azure.batch.models.UserIdentity + :param rerun_on_node_reboot_after_success: Whether the Batch service + should rerun the Job Preparation task after a compute node reboots. The + Job Preparation task is always rerun if a compute node is reimaged, or if + the Job Preparation task did not complete (e.g. because the reboot + occurred while the task was running). Therefore, you should always write a + Job Preparation task to be idempotent and to behave correctly if run + multiple times. The default value is true. + :type rerun_on_node_reboot_after_success: bool + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'rerun_on_node_reboot_after_success': {'key': 'rerunOnNodeRebootAfterSuccess', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(JobPreparationTask, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.resource_files = kwargs.get('resource_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.constraints = kwargs.get('constraints', None) + self.wait_for_success = kwargs.get('wait_for_success', None) + self.user_identity = kwargs.get('user_identity', None) + self.rerun_on_node_reboot_after_success = kwargs.get('rerun_on_node_reboot_after_success', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_preparation_task_execution_information.py b/azext/generated/sdk/batch/v2018_08_01/models/job_preparation_task_execution_information.py new file mode 100644 index 00000000..f51b95a1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_preparation_task_execution_information.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationTaskExecutionInformation(Model): + """Contains information about the execution of a Job Preparation task on a + compute node. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The time at which the task started running. + If the task has been restarted or retried, this is the most recent time at + which the task started running. + :type start_time: datetime + :param end_time: The time at which the Job Preparation task completed. + This property is set only if the task is in the Completed state. + :type end_time: datetime + :param state: Required. The current state of the Job Preparation task on + the compute node. Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.JobPreparationTaskState + :param task_root_directory: The root directory of the Job Preparation task + on the compute node. You can use this path to retrieve files created by + the task, such as log files. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the Job + Preparation task on the compute node. + :type task_root_directory_url: str + :param exit_code: The exit code of the program specified on the task + command line. This parameter is returned only if the task is in the + completed state. The exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. Note + that the exit code may also be generated by the compute node operating + system, such as when a process is forcibly terminated. + :type exit_code: int + :param container_info: Information about the container under which the + task is executing. This property is set only if the task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the task failure, if any. This + property is set only if the task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the task could not be run) and + file upload errors are not retried. The Batch service will retry the task + up to the limit specified by the constraints. Task application failures + (non-zero exit code) are retried, pre-processing errors (the task could + not be run) and file upload errors are not retried. The Batch service will + retry the task up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the Job + Preparation task started running. This property is set only if the task + was retried (i.e. retryCount is nonzero). If present, this is typically + the same as startTime, but may be different if the task has been restarted + for reasons other than retry; for example, if the compute node was + rebooted during a retry, then the startTime is updated but the + lastRetryTime is not. + :type last_retry_time: datetime + :param result: The result of the task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'start_time': {'required': True}, + 'state': {'required': True}, + 'retry_count': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobPreparationTaskState'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, **kwargs): + super(JobPreparationTaskExecutionInformation, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.state = kwargs.get('state', None) + self.task_root_directory = kwargs.get('task_root_directory', None) + self.task_root_directory_url = kwargs.get('task_root_directory_url', None) + self.exit_code = kwargs.get('exit_code', None) + self.container_info = kwargs.get('container_info', None) + self.failure_info = kwargs.get('failure_info', None) + self.retry_count = kwargs.get('retry_count', None) + self.last_retry_time = kwargs.get('last_retry_time', None) + self.result = kwargs.get('result', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_preparation_task_execution_information_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_preparation_task_execution_information_py3.py new file mode 100644 index 00000000..36bd33f7 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_preparation_task_execution_information_py3.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationTaskExecutionInformation(Model): + """Contains information about the execution of a Job Preparation task on a + compute node. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The time at which the task started running. + If the task has been restarted or retried, this is the most recent time at + which the task started running. + :type start_time: datetime + :param end_time: The time at which the Job Preparation task completed. + This property is set only if the task is in the Completed state. + :type end_time: datetime + :param state: Required. The current state of the Job Preparation task on + the compute node. Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.JobPreparationTaskState + :param task_root_directory: The root directory of the Job Preparation task + on the compute node. You can use this path to retrieve files created by + the task, such as log files. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the Job + Preparation task on the compute node. + :type task_root_directory_url: str + :param exit_code: The exit code of the program specified on the task + command line. This parameter is returned only if the task is in the + completed state. The exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. Note + that the exit code may also be generated by the compute node operating + system, such as when a process is forcibly terminated. + :type exit_code: int + :param container_info: Information about the container under which the + task is executing. This property is set only if the task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the task failure, if any. This + property is set only if the task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the task could not be run) and + file upload errors are not retried. The Batch service will retry the task + up to the limit specified by the constraints. Task application failures + (non-zero exit code) are retried, pre-processing errors (the task could + not be run) and file upload errors are not retried. The Batch service will + retry the task up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the Job + Preparation task started running. This property is set only if the task + was retried (i.e. retryCount is nonzero). If present, this is typically + the same as startTime, but may be different if the task has been restarted + for reasons other than retry; for example, if the compute node was + rebooted during a retry, then the startTime is updated but the + lastRetryTime is not. + :type last_retry_time: datetime + :param result: The result of the task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'start_time': {'required': True}, + 'state': {'required': True}, + 'retry_count': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobPreparationTaskState'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, *, start_time, state, retry_count: int, end_time=None, task_root_directory: str=None, task_root_directory_url: str=None, exit_code: int=None, container_info=None, failure_info=None, last_retry_time=None, result=None, **kwargs) -> None: + super(JobPreparationTaskExecutionInformation, self).__init__(**kwargs) + self.start_time = start_time + self.end_time = end_time + self.state = state + self.task_root_directory = task_root_directory + self.task_root_directory_url = task_root_directory_url + self.exit_code = exit_code + self.container_info = container_info + self.failure_info = failure_info + self.retry_count = retry_count + self.last_retry_time = last_retry_time + self.result = result diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_preparation_task_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_preparation_task_py3.py new file mode 100644 index 00000000..93e99d37 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_preparation_task_py3.py @@ -0,0 +1,146 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationTask(Model): + """A Job Preparation task to run before any tasks of the job on any given + compute node. + + You can use Job Preparation to prepare a compute node to run tasks for the + job. Activities commonly performed in Job Preparation include: Downloading + common resource files used by all the tasks in the job. The Job Preparation + task can download these common resource files to the shared location on the + compute node. (AZ_BATCH_NODE_ROOT_DIR\shared), or starting a local service + on the compute node so that all tasks of that job can communicate with it. + If the Job Preparation task fails (that is, exhausts its retry count before + exiting with exit code 0), Batch will not run tasks of this job on the + compute node. The node remains ineligible to run tasks of this job until it + is reimaged. The node remains active and can be used for other jobs. The + Job Preparation task can run multiple times on the same compute node. + Therefore, you should write the Job Preparation task to handle + re-execution. If the compute node is rebooted, the Job Preparation task is + run again on the node before scheduling any other task of the job, if + rerunOnNodeRebootAfterSuccess is true or if the Job Preparation task did + not previously complete. If the compute node is reimaged, the Job + Preparation task is run again before scheduling any task of the job. Batch + will retry tasks when a recovery operation is triggered on a compute node. + Examples of recovery operations include (but are not limited to) when an + unhealthy compute node is rebooted or a compute node disappeared due to + host failure. Retries due to recovery operations are independent of and are + not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is + 0, an internal retry due to a recovery operation may occur. Because of + this, all tasks should be idempotent. This means tasks need to tolerate + being interrupted and restarted without causing any corruption or duplicate + data. The best practice for long running tasks is to use some form of + checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: A string that uniquely identifies the Job Preparation task + within the job. The ID can contain any combination of alphanumeric + characters including hyphens and underscores and cannot contain more than + 64 characters. If you do not specify this property, the Batch service + assigns a default value of 'jobpreparation'. No other task in the job can + have the same ID as the Job Preparation task. If you try to submit a task + with the same id, the Batch service rejects the request with error code + TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, + the HTTP status code is 409 (Conflict). + :type id: str + :param command_line: Required. The command line of the Job Preparation + task. The command line does not run under a shell, and therefore cannot + take advantage of shell features such as environment variable expansion. + If you want to take advantage of such features, you should invoke the + shell in the command line, for example using "cmd /c MyCommand" in Windows + or "/bin/sh -c MyCommand" in Linux. If the command line refers to file + paths, it should use a relative path (relative to the task working + directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Preparation task runs. When this is specified, all directories + recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch + directories on the node) are mapped into the container, all task + environment variables are mapped into the container, and the task command + line is executed in the container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. Files listed + under this element are located in the task's working directory. There is + a maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the Job Preparation task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param constraints: Constraints that apply to the Job Preparation task. + :type constraints: ~azure.batch.models.TaskConstraints + :param wait_for_success: Whether the Batch service should wait for the Job + Preparation task to complete successfully before scheduling any other + tasks of the job on the compute node. A Job Preparation task has completed + successfully if it exits with exit code 0. If true and the Job Preparation + task fails on a compute node, the Batch service retries the Job + Preparation task up to its maximum retry count (as specified in the + constraints element). If the task has still not completed successfully + after all retries, then the Batch service will not schedule tasks of the + job to the compute node. The compute node remains active and eligible to + run tasks of other jobs. If false, the Batch service will not wait for the + Job Preparation task to complete. In this case, other tasks of the job can + start executing on the compute node while the Job Preparation task is + still running; and even if the Job Preparation task fails, new tasks will + continue to be scheduled on the node. The default value is true. + :type wait_for_success: bool + :param user_identity: The user identity under which the Job Preparation + task runs. If omitted, the task runs as a non-administrative user unique + to the task on Windows nodes, or a non-administrative user unique to the + pool on Linux nodes. + :type user_identity: ~azure.batch.models.UserIdentity + :param rerun_on_node_reboot_after_success: Whether the Batch service + should rerun the Job Preparation task after a compute node reboots. The + Job Preparation task is always rerun if a compute node is reimaged, or if + the Job Preparation task did not complete (e.g. because the reboot + occurred while the task was running). Therefore, you should always write a + Job Preparation task to be idempotent and to behave correctly if run + multiple times. The default value is true. + :type rerun_on_node_reboot_after_success: bool + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'rerun_on_node_reboot_after_success': {'key': 'rerunOnNodeRebootAfterSuccess', 'type': 'bool'}, + } + + def __init__(self, *, command_line: str, id: str=None, container_settings=None, resource_files=None, environment_settings=None, constraints=None, wait_for_success: bool=None, user_identity=None, rerun_on_node_reboot_after_success: bool=None, **kwargs) -> None: + super(JobPreparationTask, self).__init__(**kwargs) + self.id = id + self.command_line = command_line + self.container_settings = container_settings + self.resource_files = resource_files + self.environment_settings = environment_settings + self.constraints = constraints + self.wait_for_success = wait_for_success + self.user_identity = user_identity + self.rerun_on_node_reboot_after_success = rerun_on_node_reboot_after_success diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_release_task.py b/azext/generated/sdk/batch/v2018_08_01/models/job_release_task.py new file mode 100644 index 00000000..cb91dd91 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_release_task.py @@ -0,0 +1,121 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobReleaseTask(Model): + """A Job Release task to run on job completion on any compute node where the + job has run. + + The Job Release task runs when the job ends, because of one of the + following: The user calls the Terminate Job API, or the Delete Job API + while the job is still active, the job's maximum wall clock time constraint + is reached, and the job is still active, or the job's Job Manager task + completed, and the job is configured to terminate when the Job Manager + completes. The Job Release task runs on each compute node where tasks of + the job have run and the Job Preparation task ran and completed. If you + reimage a compute node after it has run the Job Preparation task, and the + job ends without any further tasks of the job running on that compute node + (and hence the Job Preparation task does not re-run), then the Job Release + task does not run on that node. If a compute node reboots while the Job + Release task is still running, the Job Release task runs again when the + compute node starts up. The job is not marked as complete until all Job + Release tasks have completed. The Job Release task runs in the background. + It does not occupy a scheduling slot; that is, it does not count towards + the maxTasksPerNode limit specified on the pool. + + All required parameters must be populated in order to send to Azure. + + :param id: A string that uniquely identifies the Job Release task within + the job. The ID can contain any combination of alphanumeric characters + including hyphens and underscores and cannot contain more than 64 + characters. If you do not specify this property, the Batch service assigns + a default value of 'jobrelease'. No other task in the job can have the + same ID as the Job Release task. If you try to submit a task with the same + id, the Batch service rejects the request with error code + TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the + HTTP status code is 409 (Conflict). + :type id: str + :param command_line: Required. The command line of the Job Release task. + The command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Release task runs. When this is specified, all directories recursively + below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on + the node) are mapped into the container, all task environment variables + are mapped into the container, and the task command line is executed in + the container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. Files listed under this + element are located in the task's working directory. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the Job Release task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param max_wall_clock_time: The maximum elapsed time that the Job Release + task may run on a given compute node, measured from the time the task + starts. If the task does not complete within the time limit, the Batch + service terminates it. The default value is 15 minutes. You may not + specify a timeout longer than 15 minutes. If you do, the Batch service + rejects it with an error; if you are calling the REST API directly, the + HTTP status code is 400 (Bad Request). + :type max_wall_clock_time: timedelta + :param retention_time: The minimum time to retain the task directory for + the Job Release task on the compute node. After this time, the Batch + service may delete the task directory and all its contents. The default is + infinite, i.e. the task directory will be retained until the compute node + is removed or reimaged. + :type retention_time: timedelta + :param user_identity: The user identity under which the Job Release task + runs. If omitted, the task runs as a non-administrative user unique to the + task. + :type user_identity: ~azure.batch.models.UserIdentity + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'retention_time': {'key': 'retentionTime', 'type': 'duration'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + } + + def __init__(self, **kwargs): + super(JobReleaseTask, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.resource_files = kwargs.get('resource_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.max_wall_clock_time = kwargs.get('max_wall_clock_time', None) + self.retention_time = kwargs.get('retention_time', None) + self.user_identity = kwargs.get('user_identity', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_release_task_execution_information.py b/azext/generated/sdk/batch/v2018_08_01/models/job_release_task_execution_information.py new file mode 100644 index 00000000..0ccb4f64 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_release_task_execution_information.py @@ -0,0 +1,89 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobReleaseTaskExecutionInformation(Model): + """Contains information about the execution of a Job Release task on a compute + node. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The time at which the task started running. + If the task has been restarted or retried, this is the most recent time at + which the task started running. + :type start_time: datetime + :param end_time: The time at which the Job Release task completed. This + property is set only if the task is in the Completed state. + :type end_time: datetime + :param state: Required. The current state of the Job Release task on the + compute node. Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.JobReleaseTaskState + :param task_root_directory: The root directory of the Job Release task on + the compute node. You can use this path to retrieve files created by the + task, such as log files. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the Job + Release task on the compute node. + :type task_root_directory_url: str + :param exit_code: The exit code of the program specified on the task + command line. This parameter is returned only if the task is in the + completed state. The exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. Note + that the exit code may also be generated by the compute node operating + system, such as when a process is forcibly terminated. + :type exit_code: int + :param container_info: Information about the container under which the + task is executing. This property is set only if the task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the task failure, if any. This + property is set only if the task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param result: The result of the task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'start_time': {'required': True}, + 'state': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobReleaseTaskState'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, **kwargs): + super(JobReleaseTaskExecutionInformation, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.state = kwargs.get('state', None) + self.task_root_directory = kwargs.get('task_root_directory', None) + self.task_root_directory_url = kwargs.get('task_root_directory_url', None) + self.exit_code = kwargs.get('exit_code', None) + self.container_info = kwargs.get('container_info', None) + self.failure_info = kwargs.get('failure_info', None) + self.result = kwargs.get('result', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_release_task_execution_information_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_release_task_execution_information_py3.py new file mode 100644 index 00000000..ed08089b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_release_task_execution_information_py3.py @@ -0,0 +1,89 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobReleaseTaskExecutionInformation(Model): + """Contains information about the execution of a Job Release task on a compute + node. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The time at which the task started running. + If the task has been restarted or retried, this is the most recent time at + which the task started running. + :type start_time: datetime + :param end_time: The time at which the Job Release task completed. This + property is set only if the task is in the Completed state. + :type end_time: datetime + :param state: Required. The current state of the Job Release task on the + compute node. Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.JobReleaseTaskState + :param task_root_directory: The root directory of the Job Release task on + the compute node. You can use this path to retrieve files created by the + task, such as log files. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the Job + Release task on the compute node. + :type task_root_directory_url: str + :param exit_code: The exit code of the program specified on the task + command line. This parameter is returned only if the task is in the + completed state. The exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. Note + that the exit code may also be generated by the compute node operating + system, such as when a process is forcibly terminated. + :type exit_code: int + :param container_info: Information about the container under which the + task is executing. This property is set only if the task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the task failure, if any. This + property is set only if the task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param result: The result of the task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'start_time': {'required': True}, + 'state': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobReleaseTaskState'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, *, start_time, state, end_time=None, task_root_directory: str=None, task_root_directory_url: str=None, exit_code: int=None, container_info=None, failure_info=None, result=None, **kwargs) -> None: + super(JobReleaseTaskExecutionInformation, self).__init__(**kwargs) + self.start_time = start_time + self.end_time = end_time + self.state = state + self.task_root_directory = task_root_directory + self.task_root_directory_url = task_root_directory_url + self.exit_code = exit_code + self.container_info = container_info + self.failure_info = failure_info + self.result = result diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_release_task_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_release_task_py3.py new file mode 100644 index 00000000..e8febe4c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_release_task_py3.py @@ -0,0 +1,121 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobReleaseTask(Model): + """A Job Release task to run on job completion on any compute node where the + job has run. + + The Job Release task runs when the job ends, because of one of the + following: The user calls the Terminate Job API, or the Delete Job API + while the job is still active, the job's maximum wall clock time constraint + is reached, and the job is still active, or the job's Job Manager task + completed, and the job is configured to terminate when the Job Manager + completes. The Job Release task runs on each compute node where tasks of + the job have run and the Job Preparation task ran and completed. If you + reimage a compute node after it has run the Job Preparation task, and the + job ends without any further tasks of the job running on that compute node + (and hence the Job Preparation task does not re-run), then the Job Release + task does not run on that node. If a compute node reboots while the Job + Release task is still running, the Job Release task runs again when the + compute node starts up. The job is not marked as complete until all Job + Release tasks have completed. The Job Release task runs in the background. + It does not occupy a scheduling slot; that is, it does not count towards + the maxTasksPerNode limit specified on the pool. + + All required parameters must be populated in order to send to Azure. + + :param id: A string that uniquely identifies the Job Release task within + the job. The ID can contain any combination of alphanumeric characters + including hyphens and underscores and cannot contain more than 64 + characters. If you do not specify this property, the Batch service assigns + a default value of 'jobrelease'. No other task in the job can have the + same ID as the Job Release task. If you try to submit a task with the same + id, the Batch service rejects the request with error code + TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the + HTTP status code is 409 (Conflict). + :type id: str + :param command_line: Required. The command line of the Job Release task. + The command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Release task runs. When this is specified, all directories recursively + below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on + the node) are mapped into the container, all task environment variables + are mapped into the container, and the task command line is executed in + the container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. Files listed under this + element are located in the task's working directory. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the Job Release task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param max_wall_clock_time: The maximum elapsed time that the Job Release + task may run on a given compute node, measured from the time the task + starts. If the task does not complete within the time limit, the Batch + service terminates it. The default value is 15 minutes. You may not + specify a timeout longer than 15 minutes. If you do, the Batch service + rejects it with an error; if you are calling the REST API directly, the + HTTP status code is 400 (Bad Request). + :type max_wall_clock_time: timedelta + :param retention_time: The minimum time to retain the task directory for + the Job Release task on the compute node. After this time, the Batch + service may delete the task directory and all its contents. The default is + infinite, i.e. the task directory will be retained until the compute node + is removed or reimaged. + :type retention_time: timedelta + :param user_identity: The user identity under which the Job Release task + runs. If omitted, the task runs as a non-administrative user unique to the + task. + :type user_identity: ~azure.batch.models.UserIdentity + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'retention_time': {'key': 'retentionTime', 'type': 'duration'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + } + + def __init__(self, *, command_line: str, id: str=None, container_settings=None, resource_files=None, environment_settings=None, max_wall_clock_time=None, retention_time=None, user_identity=None, **kwargs) -> None: + super(JobReleaseTask, self).__init__(**kwargs) + self.id = id + self.command_line = command_line + self.container_settings = container_settings + self.resource_files = resource_files + self.environment_settings = environment_settings + self.max_wall_clock_time = max_wall_clock_time + self.retention_time = retention_time + self.user_identity = user_identity diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_add_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_add_options.py new file mode 100644 index 00000000..6c03aaff --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_add_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleAddOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_add_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_add_options_py3.py new file mode 100644 index 00000000..fe7b76cc --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_add_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobScheduleAddOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_add_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_add_parameter.py new file mode 100644 index 00000000..e0d8d724 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_add_parameter.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleAddParameter(Model): + """A job schedule that allows recurring jobs by specifying when to run jobs + and a specification used to create each job. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the schedule within + the account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within an account that differ only by case). + :type id: str + :param display_name: The display name for the schedule. The display name + need not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param schedule: Required. The schedule according to which jobs will be + created. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: Required. The details of the jobs to be created + on this schedule. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the schedule + as metadata. The Batch service does not assign any meaning to metadata; it + is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'id': {'required': True}, + 'schedule': {'required': True}, + 'job_specification': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(JobScheduleAddParameter, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.schedule = kwargs.get('schedule', None) + self.job_specification = kwargs.get('job_specification', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_add_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_add_parameter_py3.py new file mode 100644 index 00000000..9281c765 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_add_parameter_py3.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleAddParameter(Model): + """A job schedule that allows recurring jobs by specifying when to run jobs + and a specification used to create each job. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the schedule within + the account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within an account that differ only by case). + :type id: str + :param display_name: The display name for the schedule. The display name + need not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param schedule: Required. The schedule according to which jobs will be + created. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: Required. The details of the jobs to be created + on this schedule. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the schedule + as metadata. The Batch service does not assign any meaning to metadata; it + is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'id': {'required': True}, + 'schedule': {'required': True}, + 'job_specification': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, id: str, schedule, job_specification, display_name: str=None, metadata=None, **kwargs) -> None: + super(JobScheduleAddParameter, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.schedule = schedule + self.job_specification = job_specification + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_delete_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_delete_options.py new file mode 100644 index 00000000..a7e01118 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_delete_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleDeleteOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_delete_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_delete_options_py3.py new file mode 100644 index 00000000..89ae9986 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_delete_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleDeleteOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_disable_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_disable_options.py new file mode 100644 index 00000000..9384c1fb --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_disable_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleDisableOptions(Model): + """Additional parameters for disable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleDisableOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_disable_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_disable_options_py3.py new file mode 100644 index 00000000..83adbe53 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_disable_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleDisableOptions(Model): + """Additional parameters for disable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleDisableOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_enable_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_enable_options.py new file mode 100644 index 00000000..a296d530 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_enable_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleEnableOptions(Model): + """Additional parameters for enable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleEnableOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_enable_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_enable_options_py3.py new file mode 100644 index 00000000..daa4d087 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_enable_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleEnableOptions(Model): + """Additional parameters for enable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleEnableOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_execution_information.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_execution_information.py new file mode 100644 index 00000000..b79a4e81 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_execution_information.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleExecutionInformation(Model): + """Contains information about jobs that have been and will be run under a job + schedule. + + :param next_run_time: The next time at which a job will be created under + this schedule. This property is meaningful only if the schedule is in the + active state when the time comes around. For example, if the schedule is + disabled, no job will be created at nextRunTime unless the job is enabled + before then. + :type next_run_time: datetime + :param recent_job: Information about the most recent job under the job + schedule. This property is present only if the at least one job has run + under the schedule. + :type recent_job: ~azure.batch.models.RecentJob + :param end_time: The time at which the schedule ended. This property is + set only if the job schedule is in the completed state. + :type end_time: datetime + """ + + _attribute_map = { + 'next_run_time': {'key': 'nextRunTime', 'type': 'iso-8601'}, + 'recent_job': {'key': 'recentJob', 'type': 'RecentJob'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(JobScheduleExecutionInformation, self).__init__(**kwargs) + self.next_run_time = kwargs.get('next_run_time', None) + self.recent_job = kwargs.get('recent_job', None) + self.end_time = kwargs.get('end_time', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_execution_information_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_execution_information_py3.py new file mode 100644 index 00000000..6afcaa38 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_execution_information_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleExecutionInformation(Model): + """Contains information about jobs that have been and will be run under a job + schedule. + + :param next_run_time: The next time at which a job will be created under + this schedule. This property is meaningful only if the schedule is in the + active state when the time comes around. For example, if the schedule is + disabled, no job will be created at nextRunTime unless the job is enabled + before then. + :type next_run_time: datetime + :param recent_job: Information about the most recent job under the job + schedule. This property is present only if the at least one job has run + under the schedule. + :type recent_job: ~azure.batch.models.RecentJob + :param end_time: The time at which the schedule ended. This property is + set only if the job schedule is in the completed state. + :type end_time: datetime + """ + + _attribute_map = { + 'next_run_time': {'key': 'nextRunTime', 'type': 'iso-8601'}, + 'recent_job': {'key': 'recentJob', 'type': 'RecentJob'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + } + + def __init__(self, *, next_run_time=None, recent_job=None, end_time=None, **kwargs) -> None: + super(JobScheduleExecutionInformation, self).__init__(**kwargs) + self.next_run_time = next_run_time + self.recent_job = recent_job + self.end_time = end_time diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_exists_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_exists_options.py new file mode 100644 index 00000000..c4f228d7 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_exists_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleExistsOptions(Model): + """Additional parameters for exists operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleExistsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_exists_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_exists_options_py3.py new file mode 100644 index 00000000..da8e15d2 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_exists_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleExistsOptions(Model): + """Additional parameters for exists operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleExistsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_get_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_get_options.py new file mode 100644 index 00000000..434b0ab1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_get_options.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_get_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_get_options_py3.py new file mode 100644 index 00000000..11ee540f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_get_options_py3.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleGetOptions, self).__init__(**kwargs) + self.select = select + self.expand = expand + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_list_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_list_options.py new file mode 100644 index 00000000..28af3945 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_list_options.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 job schedules can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_list_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_list_options_py3.py new file mode 100644 index 00000000..017cdb10 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_list_options_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 job schedules can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobScheduleListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.expand = expand + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_patch_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_patch_options.py new file mode 100644 index 00000000..841e56e9 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_patch_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulePatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobSchedulePatchOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_patch_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_patch_options_py3.py new file mode 100644 index 00000000..06e4f626 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_patch_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulePatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobSchedulePatchOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_patch_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_patch_parameter.py new file mode 100644 index 00000000..24a074bf --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_patch_parameter.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulePatchParameter(Model): + """The set of changes to be made to a job schedule. + + :param schedule: The schedule according to which jobs will be created. If + you do not specify this element, the existing schedule is left unchanged. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: The details of the jobs to be created on this + schedule. Updates affect only jobs that are started after the update has + taken place. Any currently active job continues with the older + specification. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the job + schedule as metadata. If you do not specify this element, existing + metadata is left unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(JobSchedulePatchParameter, self).__init__(**kwargs) + self.schedule = kwargs.get('schedule', None) + self.job_specification = kwargs.get('job_specification', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_patch_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_patch_parameter_py3.py new file mode 100644 index 00000000..4102022b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_patch_parameter_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulePatchParameter(Model): + """The set of changes to be made to a job schedule. + + :param schedule: The schedule according to which jobs will be created. If + you do not specify this element, the existing schedule is left unchanged. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: The details of the jobs to be created on this + schedule. Updates affect only jobs that are started after the update has + taken place. Any currently active job continues with the older + specification. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the job + schedule as metadata. If you do not specify this element, existing + metadata is left unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, schedule=None, job_specification=None, metadata=None, **kwargs) -> None: + super(JobSchedulePatchParameter, self).__init__(**kwargs) + self.schedule = schedule + self.job_specification = job_specification + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_statistics.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_statistics.py new file mode 100644 index 00000000..ebf2e3e2 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_statistics.py @@ -0,0 +1,126 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleStatistics(Model): + """Resource usage statistics for a job schedule. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all compute nodes) consumed by all tasks in all jobs + created under the schedule. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all compute nodes) consumed by all tasks in all jobs + created under the schedule. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of all the + tasks in all the jobs created under the schedule. The wall clock time is + the elapsed time from when the task started running on a compute node to + when it finished (or to the last time the statistics were updated, if the + task had not finished by then). If a task was retried, this includes the + wall clock time of all the task retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by all tasks in all jobs created under the schedule. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by all tasks in all jobs created under the schedule. + :type write_iops: long + :param read_io_gi_b: Required. The total gibibytes read from disk by all + tasks in all jobs created under the schedule. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total gibibytes written to disk by all + tasks in all jobs created under the schedule. + :type write_io_gi_b: float + :param num_succeeded_tasks: Required. The total number of tasks + successfully completed during the given time range in jobs created under + the schedule. A task completes successfully if it returns exit code 0. + :type num_succeeded_tasks: long + :param num_failed_tasks: Required. The total number of tasks that failed + during the given time range in jobs created under the schedule. A task + fails if it exhausts its maximum retry count without returning exit code + 0. + :type num_failed_tasks: long + :param num_task_retries: Required. The total number of retries during the + given time range on all tasks in all jobs created under the schedule. + :type num_task_retries: long + :param wait_time: Required. The total wait time of all tasks in all jobs + created under the schedule. The wait time for a task is defined as the + elapsed time between the creation of the task and the start of task + execution. (If the task is retried due to failures, the wait time is the + time to the most recent task execution.). This value is only reported in + the account lifetime statistics; it is not included in the job statistics. + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'num_succeeded_tasks': {'required': True}, + 'num_failed_tasks': {'required': True}, + 'num_task_retries': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'num_succeeded_tasks': {'key': 'numSucceededTasks', 'type': 'long'}, + 'num_failed_tasks': {'key': 'numFailedTasks', 'type': 'long'}, + 'num_task_retries': {'key': 'numTaskRetries', 'type': 'long'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(JobScheduleStatistics, self).__init__(**kwargs) + self.url = kwargs.get('url', None) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.user_cpu_time = kwargs.get('user_cpu_time', None) + self.kernel_cpu_time = kwargs.get('kernel_cpu_time', None) + self.wall_clock_time = kwargs.get('wall_clock_time', None) + self.read_iops = kwargs.get('read_iops', None) + self.write_iops = kwargs.get('write_iops', None) + self.read_io_gi_b = kwargs.get('read_io_gi_b', None) + self.write_io_gi_b = kwargs.get('write_io_gi_b', None) + self.num_succeeded_tasks = kwargs.get('num_succeeded_tasks', None) + self.num_failed_tasks = kwargs.get('num_failed_tasks', None) + self.num_task_retries = kwargs.get('num_task_retries', None) + self.wait_time = kwargs.get('wait_time', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_statistics_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_statistics_py3.py new file mode 100644 index 00000000..d335aa94 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_statistics_py3.py @@ -0,0 +1,126 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleStatistics(Model): + """Resource usage statistics for a job schedule. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all compute nodes) consumed by all tasks in all jobs + created under the schedule. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all compute nodes) consumed by all tasks in all jobs + created under the schedule. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of all the + tasks in all the jobs created under the schedule. The wall clock time is + the elapsed time from when the task started running on a compute node to + when it finished (or to the last time the statistics were updated, if the + task had not finished by then). If a task was retried, this includes the + wall clock time of all the task retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by all tasks in all jobs created under the schedule. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by all tasks in all jobs created under the schedule. + :type write_iops: long + :param read_io_gi_b: Required. The total gibibytes read from disk by all + tasks in all jobs created under the schedule. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total gibibytes written to disk by all + tasks in all jobs created under the schedule. + :type write_io_gi_b: float + :param num_succeeded_tasks: Required. The total number of tasks + successfully completed during the given time range in jobs created under + the schedule. A task completes successfully if it returns exit code 0. + :type num_succeeded_tasks: long + :param num_failed_tasks: Required. The total number of tasks that failed + during the given time range in jobs created under the schedule. A task + fails if it exhausts its maximum retry count without returning exit code + 0. + :type num_failed_tasks: long + :param num_task_retries: Required. The total number of retries during the + given time range on all tasks in all jobs created under the schedule. + :type num_task_retries: long + :param wait_time: Required. The total wait time of all tasks in all jobs + created under the schedule. The wait time for a task is defined as the + elapsed time between the creation of the task and the start of task + execution. (If the task is retried due to failures, the wait time is the + time to the most recent task execution.). This value is only reported in + the account lifetime statistics; it is not included in the job statistics. + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'num_succeeded_tasks': {'required': True}, + 'num_failed_tasks': {'required': True}, + 'num_task_retries': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'num_succeeded_tasks': {'key': 'numSucceededTasks', 'type': 'long'}, + 'num_failed_tasks': {'key': 'numFailedTasks', 'type': 'long'}, + 'num_task_retries': {'key': 'numTaskRetries', 'type': 'long'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, *, url: str, start_time, last_update_time, user_cpu_time, kernel_cpu_time, wall_clock_time, read_iops: int, write_iops: int, read_io_gi_b: float, write_io_gi_b: float, num_succeeded_tasks: int, num_failed_tasks: int, num_task_retries: int, wait_time, **kwargs) -> None: + super(JobScheduleStatistics, self).__init__(**kwargs) + self.url = url + self.start_time = start_time + self.last_update_time = last_update_time + self.user_cpu_time = user_cpu_time + self.kernel_cpu_time = kernel_cpu_time + self.wall_clock_time = wall_clock_time + self.read_iops = read_iops + self.write_iops = write_iops + self.read_io_gi_b = read_io_gi_b + self.write_io_gi_b = write_io_gi_b + self.num_succeeded_tasks = num_succeeded_tasks + self.num_failed_tasks = num_failed_tasks + self.num_task_retries = num_task_retries + self.wait_time = wait_time diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_terminate_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_terminate_options.py new file mode 100644 index 00000000..32a6f0d7 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_terminate_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleTerminateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_terminate_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_terminate_options_py3.py new file mode 100644 index 00000000..54789876 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_terminate_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleTerminateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_update_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_update_options.py new file mode 100644 index 00000000..ca3de898 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_update_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleUpdateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_update_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_update_options_py3.py new file mode 100644 index 00000000..aee92988 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_update_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleUpdateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_update_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_update_parameter.py new file mode 100644 index 00000000..757ef299 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_update_parameter.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleUpdateParameter(Model): + """The set of changes to be made to a job schedule. + + All required parameters must be populated in order to send to Azure. + + :param schedule: Required. The schedule according to which jobs will be + created. If you do not specify this element, it is equivalent to passing + the default schedule: that is, a single job scheduled to run immediately. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: Required. Details of the jobs to be created on + this schedule. Updates affect only jobs that are started after the update + has taken place. Any currently active job continues with the older + specification. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the job + schedule as metadata. If you do not specify this element, it takes the + default value of an empty list; in effect, any existing metadata is + deleted. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'schedule': {'required': True}, + 'job_specification': {'required': True}, + } + + _attribute_map = { + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(JobScheduleUpdateParameter, self).__init__(**kwargs) + self.schedule = kwargs.get('schedule', None) + self.job_specification = kwargs.get('job_specification', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_update_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_update_parameter_py3.py new file mode 100644 index 00000000..b3b216ee --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_schedule_update_parameter_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleUpdateParameter(Model): + """The set of changes to be made to a job schedule. + + All required parameters must be populated in order to send to Azure. + + :param schedule: Required. The schedule according to which jobs will be + created. If you do not specify this element, it is equivalent to passing + the default schedule: that is, a single job scheduled to run immediately. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: Required. Details of the jobs to be created on + this schedule. Updates affect only jobs that are started after the update + has taken place. Any currently active job continues with the older + specification. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the job + schedule as metadata. If you do not specify this element, it takes the + default value of an empty list; in effect, any existing metadata is + deleted. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'schedule': {'required': True}, + 'job_specification': {'required': True}, + } + + _attribute_map = { + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, schedule, job_specification, metadata=None, **kwargs) -> None: + super(JobScheduleUpdateParameter, self).__init__(**kwargs) + self.schedule = schedule + self.job_specification = job_specification + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_scheduling_error.py b/azext/generated/sdk/batch/v2018_08_01/models/job_scheduling_error.py new file mode 100644 index 00000000..1869114d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_scheduling_error.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulingError(Model): + """An error encountered by the Batch service when scheduling a job. + + All required parameters must be populated in order to send to Azure. + + :param category: Required. The category of the job scheduling error. + Possible values include: 'userError', 'serverError' + :type category: str or ~azure.batch.models.ErrorCategory + :param code: An identifier for the job scheduling error. Codes are + invariant and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the job scheduling error, intended to + be suitable for display in a user interface. + :type message: str + :param details: A list of additional error details related to the + scheduling error. + :type details: list[~azure.batch.models.NameValuePair] + """ + + _validation = { + 'category': {'required': True}, + } + + _attribute_map = { + 'category': {'key': 'category', 'type': 'ErrorCategory'}, + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(JobSchedulingError, self).__init__(**kwargs) + self.category = kwargs.get('category', None) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.details = kwargs.get('details', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_scheduling_error_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_scheduling_error_py3.py new file mode 100644 index 00000000..c12cb339 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_scheduling_error_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulingError(Model): + """An error encountered by the Batch service when scheduling a job. + + All required parameters must be populated in order to send to Azure. + + :param category: Required. The category of the job scheduling error. + Possible values include: 'userError', 'serverError' + :type category: str or ~azure.batch.models.ErrorCategory + :param code: An identifier for the job scheduling error. Codes are + invariant and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the job scheduling error, intended to + be suitable for display in a user interface. + :type message: str + :param details: A list of additional error details related to the + scheduling error. + :type details: list[~azure.batch.models.NameValuePair] + """ + + _validation = { + 'category': {'required': True}, + } + + _attribute_map = { + 'category': {'key': 'category', 'type': 'ErrorCategory'}, + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, category, code: str=None, message: str=None, details=None, **kwargs) -> None: + super(JobSchedulingError, self).__init__(**kwargs) + self.category = category + self.code = code + self.message = message + self.details = details diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_specification.py b/azext/generated/sdk/batch/v2018_08_01/models/job_specification.py new file mode 100644 index 00000000..5ff5a6b0 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_specification.py @@ -0,0 +1,125 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSpecification(Model): + """Specifies details of the jobs to be created on a schedule. + + All required parameters must be populated in order to send to Azure. + + :param priority: The priority of jobs created under this schedule. + Priority values can range from -1000 to 1000, with -1000 being the lowest + priority and 1000 being the highest priority. The default value is 0. This + priority is used as the default for all jobs under the job schedule. You + can update a job's priority after it has been created using by using the + update job API. + :type priority: int + :param display_name: The display name for jobs created under this + schedule. The name need not be unique and can contain any Unicode + characters up to a maximum length of 1024. + :type display_name: str + :param uses_task_dependencies: Whether tasks in the job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + :param on_all_tasks_complete: The action the Batch service should take + when all tasks in a job created under this schedule are in the completed + state. Note that if a job contains no tasks, then all tasks are considered + complete. This option is therefore most commonly used with a Job Manager + task; if you want to use automatic job termination without a Job Manager, + you should initially set onAllTasksComplete to noaction and update the job + properties to set onAllTasksComplete to terminatejob once you have + finished adding tasks. The default is noaction. Possible values include: + 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + task fails in a job created under this schedule. A task is considered to + have failed if it have failed if has a failureInfo. A failureInfo is set + if the task completes with a non-zero exit code after exhausting its retry + count, or if there was an error starting the task, for example due to a + resource file download error. The default is noaction. Possible values + include: 'noAction', 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param constraints: The execution constraints for jobs created under this + schedule. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: The details of a Job Manager task to be launched + when a job is started under this schedule. If the job does not specify a + Job Manager task, the user must explicitly add tasks to the job using the + Task API. If the job does specify a Job Manager task, the Batch service + creates the Job Manager task when the job is created, and will try to + schedule the Job Manager task before scheduling other tasks in the job. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation task for jobs created + under this schedule. If a job has a Job Preparation task, the Batch + service will run the Job Preparation task on a compute node before + starting any tasks of that job on that compute node. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release task for jobs created under this + schedule. The primary purpose of the Job Release task is to undo changes + to compute nodes made by the Job Preparation task. Example activities + include deleting local files, or shutting down services that were started + as part of job preparation. A Job Release task cannot be specified without + also specifying a Job Preparation task for the job. The Batch service runs + the Job Release task on the compute nodes that have run the Job + Preparation task. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: A list of common environment variable + settings. These environment variables are set for all tasks in jobs + created under this schedule (including the Job Manager, Job Preparation + and Job Release tasks). Individual tasks can override an environment + setting specified here by specifying the same setting name with a + different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: Required. The pool on which the Batch service runs the + tasks of jobs created under this schedule. + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with each job + created under this schedule as metadata. The Batch service does not assign + any meaning to metadata; it is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(JobSpecification, self).__init__(**kwargs) + self.priority = kwargs.get('priority', None) + self.display_name = kwargs.get('display_name', None) + self.uses_task_dependencies = kwargs.get('uses_task_dependencies', None) + self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) + self.on_task_failure = kwargs.get('on_task_failure', None) + self.constraints = kwargs.get('constraints', None) + self.job_manager_task = kwargs.get('job_manager_task', None) + self.job_preparation_task = kwargs.get('job_preparation_task', None) + self.job_release_task = kwargs.get('job_release_task', None) + self.common_environment_settings = kwargs.get('common_environment_settings', None) + self.pool_info = kwargs.get('pool_info', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_specification_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_specification_py3.py new file mode 100644 index 00000000..09484bf8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_specification_py3.py @@ -0,0 +1,125 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSpecification(Model): + """Specifies details of the jobs to be created on a schedule. + + All required parameters must be populated in order to send to Azure. + + :param priority: The priority of jobs created under this schedule. + Priority values can range from -1000 to 1000, with -1000 being the lowest + priority and 1000 being the highest priority. The default value is 0. This + priority is used as the default for all jobs under the job schedule. You + can update a job's priority after it has been created using by using the + update job API. + :type priority: int + :param display_name: The display name for jobs created under this + schedule. The name need not be unique and can contain any Unicode + characters up to a maximum length of 1024. + :type display_name: str + :param uses_task_dependencies: Whether tasks in the job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + :param on_all_tasks_complete: The action the Batch service should take + when all tasks in a job created under this schedule are in the completed + state. Note that if a job contains no tasks, then all tasks are considered + complete. This option is therefore most commonly used with a Job Manager + task; if you want to use automatic job termination without a Job Manager, + you should initially set onAllTasksComplete to noaction and update the job + properties to set onAllTasksComplete to terminatejob once you have + finished adding tasks. The default is noaction. Possible values include: + 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + task fails in a job created under this schedule. A task is considered to + have failed if it have failed if has a failureInfo. A failureInfo is set + if the task completes with a non-zero exit code after exhausting its retry + count, or if there was an error starting the task, for example due to a + resource file download error. The default is noaction. Possible values + include: 'noAction', 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param constraints: The execution constraints for jobs created under this + schedule. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: The details of a Job Manager task to be launched + when a job is started under this schedule. If the job does not specify a + Job Manager task, the user must explicitly add tasks to the job using the + Task API. If the job does specify a Job Manager task, the Batch service + creates the Job Manager task when the job is created, and will try to + schedule the Job Manager task before scheduling other tasks in the job. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation task for jobs created + under this schedule. If a job has a Job Preparation task, the Batch + service will run the Job Preparation task on a compute node before + starting any tasks of that job on that compute node. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release task for jobs created under this + schedule. The primary purpose of the Job Release task is to undo changes + to compute nodes made by the Job Preparation task. Example activities + include deleting local files, or shutting down services that were started + as part of job preparation. A Job Release task cannot be specified without + also specifying a Job Preparation task for the job. The Batch service runs + the Job Release task on the compute nodes that have run the Job + Preparation task. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: A list of common environment variable + settings. These environment variables are set for all tasks in jobs + created under this schedule (including the Job Manager, Job Preparation + and Job Release tasks). Individual tasks can override an environment + setting specified here by specifying the same setting name with a + different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: Required. The pool on which the Batch service runs the + tasks of jobs created under this schedule. + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with each job + created under this schedule as metadata. The Batch service does not assign + any meaning to metadata; it is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, pool_info, priority: int=None, display_name: str=None, uses_task_dependencies: bool=None, on_all_tasks_complete=None, on_task_failure=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, metadata=None, **kwargs) -> None: + super(JobSpecification, self).__init__(**kwargs) + self.priority = priority + self.display_name = display_name + self.uses_task_dependencies = uses_task_dependencies + self.on_all_tasks_complete = on_all_tasks_complete + self.on_task_failure = on_task_failure + self.constraints = constraints + self.job_manager_task = job_manager_task + self.job_preparation_task = job_preparation_task + self.job_release_task = job_release_task + self.common_environment_settings = common_environment_settings + self.pool_info = pool_info + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_statistics.py b/azext/generated/sdk/batch/v2018_08_01/models/job_statistics.py new file mode 100644 index 00000000..ca95a31d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_statistics.py @@ -0,0 +1,123 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobStatistics(Model): + """Resource usage statistics for a job. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all compute nodes) consumed by all tasks in the job. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all compute nodes) consumed by all tasks in the job. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of all tasks + in the job. The wall clock time is the elapsed time from when the task + started running on a compute node to when it finished (or to the last time + the statistics were updated, if the task had not finished by then). If a + task was retried, this includes the wall clock time of all the task + retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by all tasks in the job. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by all tasks in the job. + :type write_iops: long + :param read_io_gi_b: Required. The total amount of data in GiB read from + disk by all tasks in the job. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total amount of data in GiB written to + disk by all tasks in the job. + :type write_io_gi_b: float + :param num_succeeded_tasks: Required. The total number of tasks + successfully completed in the job during the given time range. A task + completes successfully if it returns exit code 0. + :type num_succeeded_tasks: long + :param num_failed_tasks: Required. The total number of tasks in the job + that failed during the given time range. A task fails if it exhausts its + maximum retry count without returning exit code 0. + :type num_failed_tasks: long + :param num_task_retries: Required. The total number of retries on all the + tasks in the job during the given time range. + :type num_task_retries: long + :param wait_time: Required. The total wait time of all tasks in the job. + The wait time for a task is defined as the elapsed time between the + creation of the task and the start of task execution. (If the task is + retried due to failures, the wait time is the time to the most recent task + execution.) This value is only reported in the account lifetime + statistics; it is not included in the job statistics. + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'num_succeeded_tasks': {'required': True}, + 'num_failed_tasks': {'required': True}, + 'num_task_retries': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'num_succeeded_tasks': {'key': 'numSucceededTasks', 'type': 'long'}, + 'num_failed_tasks': {'key': 'numFailedTasks', 'type': 'long'}, + 'num_task_retries': {'key': 'numTaskRetries', 'type': 'long'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(JobStatistics, self).__init__(**kwargs) + self.url = kwargs.get('url', None) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.user_cpu_time = kwargs.get('user_cpu_time', None) + self.kernel_cpu_time = kwargs.get('kernel_cpu_time', None) + self.wall_clock_time = kwargs.get('wall_clock_time', None) + self.read_iops = kwargs.get('read_iops', None) + self.write_iops = kwargs.get('write_iops', None) + self.read_io_gi_b = kwargs.get('read_io_gi_b', None) + self.write_io_gi_b = kwargs.get('write_io_gi_b', None) + self.num_succeeded_tasks = kwargs.get('num_succeeded_tasks', None) + self.num_failed_tasks = kwargs.get('num_failed_tasks', None) + self.num_task_retries = kwargs.get('num_task_retries', None) + self.wait_time = kwargs.get('wait_time', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_statistics_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_statistics_py3.py new file mode 100644 index 00000000..2f55b15a --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_statistics_py3.py @@ -0,0 +1,123 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobStatistics(Model): + """Resource usage statistics for a job. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all compute nodes) consumed by all tasks in the job. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all compute nodes) consumed by all tasks in the job. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of all tasks + in the job. The wall clock time is the elapsed time from when the task + started running on a compute node to when it finished (or to the last time + the statistics were updated, if the task had not finished by then). If a + task was retried, this includes the wall clock time of all the task + retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by all tasks in the job. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by all tasks in the job. + :type write_iops: long + :param read_io_gi_b: Required. The total amount of data in GiB read from + disk by all tasks in the job. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total amount of data in GiB written to + disk by all tasks in the job. + :type write_io_gi_b: float + :param num_succeeded_tasks: Required. The total number of tasks + successfully completed in the job during the given time range. A task + completes successfully if it returns exit code 0. + :type num_succeeded_tasks: long + :param num_failed_tasks: Required. The total number of tasks in the job + that failed during the given time range. A task fails if it exhausts its + maximum retry count without returning exit code 0. + :type num_failed_tasks: long + :param num_task_retries: Required. The total number of retries on all the + tasks in the job during the given time range. + :type num_task_retries: long + :param wait_time: Required. The total wait time of all tasks in the job. + The wait time for a task is defined as the elapsed time between the + creation of the task and the start of task execution. (If the task is + retried due to failures, the wait time is the time to the most recent task + execution.) This value is only reported in the account lifetime + statistics; it is not included in the job statistics. + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'num_succeeded_tasks': {'required': True}, + 'num_failed_tasks': {'required': True}, + 'num_task_retries': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'num_succeeded_tasks': {'key': 'numSucceededTasks', 'type': 'long'}, + 'num_failed_tasks': {'key': 'numFailedTasks', 'type': 'long'}, + 'num_task_retries': {'key': 'numTaskRetries', 'type': 'long'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, *, url: str, start_time, last_update_time, user_cpu_time, kernel_cpu_time, wall_clock_time, read_iops: int, write_iops: int, read_io_gi_b: float, write_io_gi_b: float, num_succeeded_tasks: int, num_failed_tasks: int, num_task_retries: int, wait_time, **kwargs) -> None: + super(JobStatistics, self).__init__(**kwargs) + self.url = url + self.start_time = start_time + self.last_update_time = last_update_time + self.user_cpu_time = user_cpu_time + self.kernel_cpu_time = kernel_cpu_time + self.wall_clock_time = wall_clock_time + self.read_iops = read_iops + self.write_iops = write_iops + self.read_io_gi_b = read_io_gi_b + self.write_io_gi_b = write_io_gi_b + self.num_succeeded_tasks = num_succeeded_tasks + self.num_failed_tasks = num_failed_tasks + self.num_task_retries = num_task_retries + self.wait_time = wait_time diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_terminate_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_terminate_options.py new file mode 100644 index 00000000..b858c404 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_terminate_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobTerminateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_terminate_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_terminate_options_py3.py new file mode 100644 index 00000000..77173bcc --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_terminate_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobTerminateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_terminate_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/job_terminate_parameter.py new file mode 100644 index 00000000..4be6eaac --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_terminate_parameter.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobTerminateParameter(Model): + """Options when terminating a job. + + :param terminate_reason: The text you want to appear as the job's + TerminateReason. The default is 'UserTerminate'. + :type terminate_reason: str + """ + + _attribute_map = { + 'terminate_reason': {'key': 'terminateReason', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(JobTerminateParameter, self).__init__(**kwargs) + self.terminate_reason = kwargs.get('terminate_reason', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_terminate_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_terminate_parameter_py3.py new file mode 100644 index 00000000..4a496555 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_terminate_parameter_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobTerminateParameter(Model): + """Options when terminating a job. + + :param terminate_reason: The text you want to appear as the job's + TerminateReason. The default is 'UserTerminate'. + :type terminate_reason: str + """ + + _attribute_map = { + 'terminate_reason': {'key': 'terminateReason', 'type': 'str'}, + } + + def __init__(self, *, terminate_reason: str=None, **kwargs) -> None: + super(JobTerminateParameter, self).__init__(**kwargs) + self.terminate_reason = terminate_reason diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_update_options.py b/azext/generated/sdk/batch/v2018_08_01/models/job_update_options.py new file mode 100644 index 00000000..a11f18ab --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_update_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobUpdateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_update_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_update_options_py3.py new file mode 100644 index 00000000..61a47c21 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_update_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobUpdateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_update_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/job_update_parameter.py new file mode 100644 index 00000000..35f83063 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_update_parameter.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobUpdateParameter(Model): + """The set of changes to be made to a job. + + All required parameters must be populated in order to send to Azure. + + :param priority: The priority of the job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. If omitted, it is set to the default value 0. + :type priority: int + :param constraints: The execution constraints for the job. If omitted, the + constraints are cleared. + :type constraints: ~azure.batch.models.JobConstraints + :param pool_info: Required. The pool on which the Batch service runs the + job's tasks. You may change the pool for a job only when the job is + disabled. The Update Job call will fail if you include the poolInfo + element and the job is not disabled. If you specify an + autoPoolSpecification specification in the poolInfo, only the keepAlive + property can be updated, and then only if the auto pool has a + poolLifetimeOption of job. + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with the job as + metadata. If omitted, it takes the default value of an empty list; in + effect, any existing metadata is deleted. + :type metadata: list[~azure.batch.models.MetadataItem] + :param on_all_tasks_complete: The action the Batch service should take + when all tasks in the job are in the completed state. If omitted, the + completion behavior is set to noaction. If the current value is + terminatejob, this is an error because a job's completion behavior may not + be changed from terminatejob to noaction. You may not change the value + from terminatejob to noaction - that is, once you have engaged automatic + job termination, you cannot turn it off again. If you try to do this, the + request fails and Batch returns status code 400 (Bad Request) and an + 'invalid property value' error response. If you do not specify this + element in a PUT request, it is equivalent to passing noaction. This is an + error if the current value is terminatejob. Possible values include: + 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + """ + + _validation = { + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + } + + def __init__(self, **kwargs): + super(JobUpdateParameter, self).__init__(**kwargs) + self.priority = kwargs.get('priority', None) + self.constraints = kwargs.get('constraints', None) + self.pool_info = kwargs.get('pool_info', None) + self.metadata = kwargs.get('metadata', None) + self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/job_update_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/job_update_parameter_py3.py new file mode 100644 index 00000000..9dce5ea1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/job_update_parameter_py3.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobUpdateParameter(Model): + """The set of changes to be made to a job. + + All required parameters must be populated in order to send to Azure. + + :param priority: The priority of the job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. If omitted, it is set to the default value 0. + :type priority: int + :param constraints: The execution constraints for the job. If omitted, the + constraints are cleared. + :type constraints: ~azure.batch.models.JobConstraints + :param pool_info: Required. The pool on which the Batch service runs the + job's tasks. You may change the pool for a job only when the job is + disabled. The Update Job call will fail if you include the poolInfo + element and the job is not disabled. If you specify an + autoPoolSpecification specification in the poolInfo, only the keepAlive + property can be updated, and then only if the auto pool has a + poolLifetimeOption of job. + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with the job as + metadata. If omitted, it takes the default value of an empty list; in + effect, any existing metadata is deleted. + :type metadata: list[~azure.batch.models.MetadataItem] + :param on_all_tasks_complete: The action the Batch service should take + when all tasks in the job are in the completed state. If omitted, the + completion behavior is set to noaction. If the current value is + terminatejob, this is an error because a job's completion behavior may not + be changed from terminatejob to noaction. You may not change the value + from terminatejob to noaction - that is, once you have engaged automatic + job termination, you cannot turn it off again. If you try to do this, the + request fails and Batch returns status code 400 (Bad Request) and an + 'invalid property value' error response. If you do not specify this + element in a PUT request, it is equivalent to passing noaction. This is an + error if the current value is terminatejob. Possible values include: + 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + """ + + _validation = { + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + } + + def __init__(self, *, pool_info, priority: int=None, constraints=None, metadata=None, on_all_tasks_complete=None, **kwargs) -> None: + super(JobUpdateParameter, self).__init__(**kwargs) + self.priority = priority + self.constraints = constraints + self.pool_info = pool_info + self.metadata = metadata + self.on_all_tasks_complete = on_all_tasks_complete diff --git a/azext/generated/sdk/batch/v2018_08_01/models/linux_user_configuration.py b/azext/generated/sdk/batch/v2018_08_01/models/linux_user_configuration.py new file mode 100644 index 00000000..6ba12182 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/linux_user_configuration.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class LinuxUserConfiguration(Model): + """Properties used to create a user account on a Linux node. + + :param uid: The user ID of the user account. The uid and gid properties + must be specified together or not at all. If not specified the underlying + operating system picks the uid. + :type uid: int + :param gid: The group ID for the user account. The uid and gid properties + must be specified together or not at all. If not specified the underlying + operating system picks the gid. + :type gid: int + :param ssh_private_key: The SSH private key for the user account. The + private key must not be password protected. The private key is used to + automatically configure asymmetric-key based authentication for SSH + between nodes in a Linux pool when the pool's enableInterNodeCommunication + property is true (it is ignored if enableInterNodeCommunication is false). + It does this by placing the key pair into the user's .ssh directory. If + not specified, password-less SSH is not configured between nodes (no + modification of the user's .ssh directory is done). + :type ssh_private_key: str + """ + + _attribute_map = { + 'uid': {'key': 'uid', 'type': 'int'}, + 'gid': {'key': 'gid', 'type': 'int'}, + 'ssh_private_key': {'key': 'sshPrivateKey', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(LinuxUserConfiguration, self).__init__(**kwargs) + self.uid = kwargs.get('uid', None) + self.gid = kwargs.get('gid', None) + self.ssh_private_key = kwargs.get('ssh_private_key', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/linux_user_configuration_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/linux_user_configuration_py3.py new file mode 100644 index 00000000..cb35b4c4 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/linux_user_configuration_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class LinuxUserConfiguration(Model): + """Properties used to create a user account on a Linux node. + + :param uid: The user ID of the user account. The uid and gid properties + must be specified together or not at all. If not specified the underlying + operating system picks the uid. + :type uid: int + :param gid: The group ID for the user account. The uid and gid properties + must be specified together or not at all. If not specified the underlying + operating system picks the gid. + :type gid: int + :param ssh_private_key: The SSH private key for the user account. The + private key must not be password protected. The private key is used to + automatically configure asymmetric-key based authentication for SSH + between nodes in a Linux pool when the pool's enableInterNodeCommunication + property is true (it is ignored if enableInterNodeCommunication is false). + It does this by placing the key pair into the user's .ssh directory. If + not specified, password-less SSH is not configured between nodes (no + modification of the user's .ssh directory is done). + :type ssh_private_key: str + """ + + _attribute_map = { + 'uid': {'key': 'uid', 'type': 'int'}, + 'gid': {'key': 'gid', 'type': 'int'}, + 'ssh_private_key': {'key': 'sshPrivateKey', 'type': 'str'}, + } + + def __init__(self, *, uid: int=None, gid: int=None, ssh_private_key: str=None, **kwargs) -> None: + super(LinuxUserConfiguration, self).__init__(**kwargs) + self.uid = uid + self.gid = gid + self.ssh_private_key = ssh_private_key diff --git a/azext/generated/sdk/batch/v2018_08_01/models/metadata_item.py b/azext/generated/sdk/batch/v2018_08_01/models/metadata_item.py new file mode 100644 index 00000000..d1d203e8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/metadata_item.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class MetadataItem(Model): + """A name-value pair associated with a Batch service resource. + + The Batch service does not assign any meaning to this metadata; it is + solely for the use of user code. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the metadata item. + :type name: str + :param value: Required. The value of the metadata item. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(MetadataItem, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/metadata_item_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/metadata_item_py3.py new file mode 100644 index 00000000..3d127cd1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/metadata_item_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class MetadataItem(Model): + """A name-value pair associated with a Batch service resource. + + The Batch service does not assign any meaning to this metadata; it is + solely for the use of user code. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the metadata item. + :type name: str + :param value: Required. The value of the metadata item. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, *, name: str, value: str, **kwargs) -> None: + super(MetadataItem, self).__init__(**kwargs) + self.name = name + self.value = value diff --git a/azext/generated/sdk/batch/v2018_08_01/models/multi_instance_settings.py b/azext/generated/sdk/batch/v2018_08_01/models/multi_instance_settings.py new file mode 100644 index 00000000..1fad0897 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/multi_instance_settings.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class MultiInstanceSettings(Model): + """Settings which specify how to run a multi-instance task. + + Multi-instance tasks are commonly used to support MPI tasks. + + All required parameters must be populated in order to send to Azure. + + :param number_of_instances: The number of compute nodes required by the + task. If omitted, the default is 1. + :type number_of_instances: int + :param coordination_command_line: Required. The command line to run on all + the compute nodes to enable them to coordinate when the primary runs the + main task command. A typical coordination command line launches a + background service and verifies that the service is ready to process + inter-node messages. + :type coordination_command_line: str + :param common_resource_files: A list of files that the Batch service will + download before running the coordination command line. The difference + between common resource files and task resource files is that common + resource files are downloaded for all subtasks including the primary, + whereas task resource files are downloaded only for the primary. Also note + that these resource files are not downloaded to the task working + directory, but instead are downloaded to the task root directory (one + directory above the working directory). There is a maximum size for the + list of resource files. When the max size is exceeded, the request will + fail and the response error code will be RequestEntityTooLarge. If this + occurs, the collection of ResourceFiles must be reduced in size. This can + be achieved using .zip files, Application Packages, or Docker Containers. + :type common_resource_files: list[~azure.batch.models.ResourceFile] + """ + + _validation = { + 'coordination_command_line': {'required': True}, + } + + _attribute_map = { + 'number_of_instances': {'key': 'numberOfInstances', 'type': 'int'}, + 'coordination_command_line': {'key': 'coordinationCommandLine', 'type': 'str'}, + 'common_resource_files': {'key': 'commonResourceFiles', 'type': '[ResourceFile]'}, + } + + def __init__(self, **kwargs): + super(MultiInstanceSettings, self).__init__(**kwargs) + self.number_of_instances = kwargs.get('number_of_instances', None) + self.coordination_command_line = kwargs.get('coordination_command_line', None) + self.common_resource_files = kwargs.get('common_resource_files', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/multi_instance_settings_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/multi_instance_settings_py3.py new file mode 100644 index 00000000..0ea3de8e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/multi_instance_settings_py3.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class MultiInstanceSettings(Model): + """Settings which specify how to run a multi-instance task. + + Multi-instance tasks are commonly used to support MPI tasks. + + All required parameters must be populated in order to send to Azure. + + :param number_of_instances: The number of compute nodes required by the + task. If omitted, the default is 1. + :type number_of_instances: int + :param coordination_command_line: Required. The command line to run on all + the compute nodes to enable them to coordinate when the primary runs the + main task command. A typical coordination command line launches a + background service and verifies that the service is ready to process + inter-node messages. + :type coordination_command_line: str + :param common_resource_files: A list of files that the Batch service will + download before running the coordination command line. The difference + between common resource files and task resource files is that common + resource files are downloaded for all subtasks including the primary, + whereas task resource files are downloaded only for the primary. Also note + that these resource files are not downloaded to the task working + directory, but instead are downloaded to the task root directory (one + directory above the working directory). There is a maximum size for the + list of resource files. When the max size is exceeded, the request will + fail and the response error code will be RequestEntityTooLarge. If this + occurs, the collection of ResourceFiles must be reduced in size. This can + be achieved using .zip files, Application Packages, or Docker Containers. + :type common_resource_files: list[~azure.batch.models.ResourceFile] + """ + + _validation = { + 'coordination_command_line': {'required': True}, + } + + _attribute_map = { + 'number_of_instances': {'key': 'numberOfInstances', 'type': 'int'}, + 'coordination_command_line': {'key': 'coordinationCommandLine', 'type': 'str'}, + 'common_resource_files': {'key': 'commonResourceFiles', 'type': '[ResourceFile]'}, + } + + def __init__(self, *, coordination_command_line: str, number_of_instances: int=None, common_resource_files=None, **kwargs) -> None: + super(MultiInstanceSettings, self).__init__(**kwargs) + self.number_of_instances = number_of_instances + self.coordination_command_line = coordination_command_line + self.common_resource_files = common_resource_files diff --git a/azext/generated/sdk/batch/v2018_08_01/models/name_value_pair.py b/azext/generated/sdk/batch/v2018_08_01/models/name_value_pair.py new file mode 100644 index 00000000..d2775a33 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/name_value_pair.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NameValuePair(Model): + """Represents a name-value pair. + + :param name: The name in the name-value pair. + :type name: str + :param value: The value in the name-value pair. + :type value: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(NameValuePair, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/name_value_pair_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/name_value_pair_py3.py new file mode 100644 index 00000000..9e508e56 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/name_value_pair_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NameValuePair(Model): + """Represents a name-value pair. + + :param name: The name in the name-value pair. + :type name: str + :param value: The value in the name-value pair. + :type value: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, value: str=None, **kwargs) -> None: + super(NameValuePair, self).__init__(**kwargs) + self.name = name + self.value = value diff --git a/azext/generated/sdk/batch/v2018_08_01/models/network_configuration.py b/azext/generated/sdk/batch/v2018_08_01/models/network_configuration.py new file mode 100644 index 00000000..2bb54a8e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/network_configuration.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NetworkConfiguration(Model): + """The network configuration for a pool. + + :param subnet_id: The ARM resource identifier of the virtual network + subnet which the compute nodes of the pool will join. This is of the form + /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. + The virtual network must be in the same region and subscription as the + Azure Batch account. The specified subnet should have enough free IP + addresses to accommodate the number of nodes in the pool. If the subnet + doesn't have enough free IP addresses, the pool will partially allocate + compute nodes, and a resize error will occur. The 'MicrosoftAzureBatch' + service principal must have the 'Classic Virtual Machine Contributor' + Role-Based Access Control (RBAC) role for the specified VNet. The + specified subnet must allow communication from the Azure Batch service to + be able to schedule tasks on the compute nodes. This can be verified by + checking if the specified VNet has any associated Network Security Groups + (NSG). If communication to the compute nodes in the specified subnet is + denied by an NSG, then the Batch service will set the state of the compute + nodes to unusable. For pools created with virtualMachineConfiguration only + ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported, + but for pools created with cloudServiceConfiguration both ARM and classic + virtual networks are supported. If the specified VNet has any associated + Network Security Groups (NSG), then a few reserved system ports must be + enabled for inbound communication. For pools created with a virtual + machine configuration, enable ports 29876 and 29877, as well as port 22 + for Linux and port 3389 for Windows. For pools created with a cloud + service configuration, enable ports 10100, 20100, and 30100. Also enable + outbound connections to Azure Storage on port 443. For more details see: + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration + :type subnet_id: str + :param endpoint_configuration: The configuration for endpoints on compute + nodes in the Batch pool. Pool endpoint configuration is only supported on + pools with the virtualMachineConfiguration property. + :type endpoint_configuration: + ~azure.batch.models.PoolEndpointConfiguration + """ + + _attribute_map = { + 'subnet_id': {'key': 'subnetId', 'type': 'str'}, + 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'PoolEndpointConfiguration'}, + } + + def __init__(self, **kwargs): + super(NetworkConfiguration, self).__init__(**kwargs) + self.subnet_id = kwargs.get('subnet_id', None) + self.endpoint_configuration = kwargs.get('endpoint_configuration', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/network_configuration_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/network_configuration_py3.py new file mode 100644 index 00000000..475d1adc --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/network_configuration_py3.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NetworkConfiguration(Model): + """The network configuration for a pool. + + :param subnet_id: The ARM resource identifier of the virtual network + subnet which the compute nodes of the pool will join. This is of the form + /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. + The virtual network must be in the same region and subscription as the + Azure Batch account. The specified subnet should have enough free IP + addresses to accommodate the number of nodes in the pool. If the subnet + doesn't have enough free IP addresses, the pool will partially allocate + compute nodes, and a resize error will occur. The 'MicrosoftAzureBatch' + service principal must have the 'Classic Virtual Machine Contributor' + Role-Based Access Control (RBAC) role for the specified VNet. The + specified subnet must allow communication from the Azure Batch service to + be able to schedule tasks on the compute nodes. This can be verified by + checking if the specified VNet has any associated Network Security Groups + (NSG). If communication to the compute nodes in the specified subnet is + denied by an NSG, then the Batch service will set the state of the compute + nodes to unusable. For pools created with virtualMachineConfiguration only + ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported, + but for pools created with cloudServiceConfiguration both ARM and classic + virtual networks are supported. If the specified VNet has any associated + Network Security Groups (NSG), then a few reserved system ports must be + enabled for inbound communication. For pools created with a virtual + machine configuration, enable ports 29876 and 29877, as well as port 22 + for Linux and port 3389 for Windows. For pools created with a cloud + service configuration, enable ports 10100, 20100, and 30100. Also enable + outbound connections to Azure Storage on port 443. For more details see: + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration + :type subnet_id: str + :param endpoint_configuration: The configuration for endpoints on compute + nodes in the Batch pool. Pool endpoint configuration is only supported on + pools with the virtualMachineConfiguration property. + :type endpoint_configuration: + ~azure.batch.models.PoolEndpointConfiguration + """ + + _attribute_map = { + 'subnet_id': {'key': 'subnetId', 'type': 'str'}, + 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'PoolEndpointConfiguration'}, + } + + def __init__(self, *, subnet_id: str=None, endpoint_configuration=None, **kwargs) -> None: + super(NetworkConfiguration, self).__init__(**kwargs) + self.subnet_id = subnet_id + self.endpoint_configuration = endpoint_configuration diff --git a/azext/generated/sdk/batch/v2018_08_01/models/network_security_group_rule.py b/azext/generated/sdk/batch/v2018_08_01/models/network_security_group_rule.py new file mode 100644 index 00000000..569693bf --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/network_security_group_rule.py @@ -0,0 +1,55 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NetworkSecurityGroupRule(Model): + """A network security group rule to apply to an inbound endpoint. + + All required parameters must be populated in order to send to Azure. + + :param priority: Required. The priority for this rule. Priorities within a + pool must be unique and are evaluated in order of priority. The lower the + number the higher the priority. For example, rules could be specified with + order numbers of 150, 250, and 350. The rule with the order number of 150 + takes precedence over the rule that has an order of 250. Allowed + priorities are 150 to 3500. If any reserved or duplicate values are + provided the request fails with HTTP status code 400. + :type priority: int + :param access: Required. The action that should be taken for a specified + IP address, subnet range or tag. Possible values include: 'allow', 'deny' + :type access: str or ~azure.batch.models.NetworkSecurityGroupRuleAccess + :param source_address_prefix: Required. The source address prefix or tag + to match for the rule. Valid values are a single IP address (i.e. + 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all + addresses). If any other values are provided the request fails with HTTP + status code 400. + :type source_address_prefix: str + """ + + _validation = { + 'priority': {'required': True}, + 'access': {'required': True}, + 'source_address_prefix': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'access': {'key': 'access', 'type': 'NetworkSecurityGroupRuleAccess'}, + 'source_address_prefix': {'key': 'sourceAddressPrefix', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(NetworkSecurityGroupRule, self).__init__(**kwargs) + self.priority = kwargs.get('priority', None) + self.access = kwargs.get('access', None) + self.source_address_prefix = kwargs.get('source_address_prefix', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/network_security_group_rule_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/network_security_group_rule_py3.py new file mode 100644 index 00000000..9fec92ba --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/network_security_group_rule_py3.py @@ -0,0 +1,55 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NetworkSecurityGroupRule(Model): + """A network security group rule to apply to an inbound endpoint. + + All required parameters must be populated in order to send to Azure. + + :param priority: Required. The priority for this rule. Priorities within a + pool must be unique and are evaluated in order of priority. The lower the + number the higher the priority. For example, rules could be specified with + order numbers of 150, 250, and 350. The rule with the order number of 150 + takes precedence over the rule that has an order of 250. Allowed + priorities are 150 to 3500. If any reserved or duplicate values are + provided the request fails with HTTP status code 400. + :type priority: int + :param access: Required. The action that should be taken for a specified + IP address, subnet range or tag. Possible values include: 'allow', 'deny' + :type access: str or ~azure.batch.models.NetworkSecurityGroupRuleAccess + :param source_address_prefix: Required. The source address prefix or tag + to match for the rule. Valid values are a single IP address (i.e. + 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all + addresses). If any other values are provided the request fails with HTTP + status code 400. + :type source_address_prefix: str + """ + + _validation = { + 'priority': {'required': True}, + 'access': {'required': True}, + 'source_address_prefix': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'access': {'key': 'access', 'type': 'NetworkSecurityGroupRuleAccess'}, + 'source_address_prefix': {'key': 'sourceAddressPrefix', 'type': 'str'}, + } + + def __init__(self, *, priority: int, access, source_address_prefix: str, **kwargs) -> None: + super(NetworkSecurityGroupRule, self).__init__(**kwargs) + self.priority = priority + self.access = access + self.source_address_prefix = source_address_prefix diff --git a/azext/generated/sdk/batch/v2018_08_01/models/node_agent_information.py b/azext/generated/sdk/batch/v2018_08_01/models/node_agent_information.py new file mode 100644 index 00000000..0d61a707 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/node_agent_information.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeAgentInformation(Model): + """Information about the node agent. + + The Batch node agent is a program that runs on each node in the pool and + provides Batch capability on the compute node. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The version of the Batch node agent running on + the compute node. This version number can be checked against the node + agent release notes located at + https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. + :type version: str + :param last_update_time: Required. The time when the node agent was + updated on the compute node. This is the most recent time that the node + agent was updated to a new version. + :type last_update_time: datetime + """ + + _validation = { + 'version': {'required': True}, + 'last_update_time': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'version', 'type': 'str'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(NodeAgentInformation, self).__init__(**kwargs) + self.version = kwargs.get('version', None) + self.last_update_time = kwargs.get('last_update_time', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/node_agent_information_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/node_agent_information_py3.py new file mode 100644 index 00000000..770e3ca5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/node_agent_information_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeAgentInformation(Model): + """Information about the node agent. + + The Batch node agent is a program that runs on each node in the pool and + provides Batch capability on the compute node. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The version of the Batch node agent running on + the compute node. This version number can be checked against the node + agent release notes located at + https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. + :type version: str + :param last_update_time: Required. The time when the node agent was + updated on the compute node. This is the most recent time that the node + agent was updated to a new version. + :type last_update_time: datetime + """ + + _validation = { + 'version': {'required': True}, + 'last_update_time': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'version', 'type': 'str'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + } + + def __init__(self, *, version: str, last_update_time, **kwargs) -> None: + super(NodeAgentInformation, self).__init__(**kwargs) + self.version = version + self.last_update_time = last_update_time diff --git a/azext/generated/sdk/batch/v2018_08_01/models/node_agent_sku.py b/azext/generated/sdk/batch/v2018_08_01/models/node_agent_sku.py new file mode 100644 index 00000000..dac567dd --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/node_agent_sku.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeAgentSku(Model): + """A node agent SKU supported by the Batch service. + + The Batch node agent is a program that runs on each node in the pool, and + provides the command-and-control interface between the node and the Batch + service. There are different implementations of the node agent, known as + SKUs, for different operating systems. + + :param id: The ID of the node agent SKU. + :type id: str + :param verified_image_references: The list of Azure Marketplace images + verified to be compatible with this node agent SKU. This collection is not + exhaustive (the node agent may be compatible with other images). + :type verified_image_references: list[~azure.batch.models.ImageReference] + :param os_type: The type of operating system (e.g. Windows or Linux) + compatible with the node agent SKU. Possible values include: 'linux', + 'windows' + :type os_type: str or ~azure.batch.models.OSType + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'verified_image_references': {'key': 'verifiedImageReferences', 'type': '[ImageReference]'}, + 'os_type': {'key': 'osType', 'type': 'OSType'}, + } + + def __init__(self, **kwargs): + super(NodeAgentSku, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.verified_image_references = kwargs.get('verified_image_references', None) + self.os_type = kwargs.get('os_type', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/node_agent_sku_paged.py b/azext/generated/sdk/batch/v2018_08_01/models/node_agent_sku_paged.py new file mode 100644 index 00000000..020e753c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/node_agent_sku_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class NodeAgentSkuPaged(Paged): + """ + A paging container for iterating over a list of :class:`NodeAgentSku ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[NodeAgentSku]'} + } + + def __init__(self, *args, **kwargs): + + super(NodeAgentSkuPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/node_agent_sku_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/node_agent_sku_py3.py new file mode 100644 index 00000000..29475f40 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/node_agent_sku_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeAgentSku(Model): + """A node agent SKU supported by the Batch service. + + The Batch node agent is a program that runs on each node in the pool, and + provides the command-and-control interface between the node and the Batch + service. There are different implementations of the node agent, known as + SKUs, for different operating systems. + + :param id: The ID of the node agent SKU. + :type id: str + :param verified_image_references: The list of Azure Marketplace images + verified to be compatible with this node agent SKU. This collection is not + exhaustive (the node agent may be compatible with other images). + :type verified_image_references: list[~azure.batch.models.ImageReference] + :param os_type: The type of operating system (e.g. Windows or Linux) + compatible with the node agent SKU. Possible values include: 'linux', + 'windows' + :type os_type: str or ~azure.batch.models.OSType + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'verified_image_references': {'key': 'verifiedImageReferences', 'type': '[ImageReference]'}, + 'os_type': {'key': 'osType', 'type': 'OSType'}, + } + + def __init__(self, *, id: str=None, verified_image_references=None, os_type=None, **kwargs) -> None: + super(NodeAgentSku, self).__init__(**kwargs) + self.id = id + self.verified_image_references = verified_image_references + self.os_type = os_type diff --git a/azext/generated/sdk/batch/v2018_08_01/models/node_counts.py b/azext/generated/sdk/batch/v2018_08_01/models/node_counts.py new file mode 100644 index 00000000..de54c0c0 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/node_counts.py @@ -0,0 +1,102 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeCounts(Model): + """The number of nodes in each node state. + + All required parameters must be populated in order to send to Azure. + + :param creating: Required. The number of nodes in the creating state. + :type creating: int + :param idle: Required. The number of nodes in the idle state. + :type idle: int + :param offline: Required. The number of nodes in the offline state. + :type offline: int + :param preempted: Required. The number of nodes in the preempted state. + :type preempted: int + :param rebooting: Required. The count of nodes in the rebooting state. + :type rebooting: int + :param reimaging: Required. The number of nodes in the reimaging state. + :type reimaging: int + :param running: Required. The number of nodes in the running state. + :type running: int + :param starting: Required. The number of nodes in the starting state. + :type starting: int + :param start_task_failed: Required. The number of nodes in the + startTaskFailed state. + :type start_task_failed: int + :param leaving_pool: Required. The number of nodes in the leavingPool + state. + :type leaving_pool: int + :param unknown: Required. The number of nodes in the unknown state. + :type unknown: int + :param unusable: Required. The number of nodes in the unusable state. + :type unusable: int + :param waiting_for_start_task: Required. The number of nodes in the + waitingForStartTask state. + :type waiting_for_start_task: int + :param total: Required. The total number of nodes. + :type total: int + """ + + _validation = { + 'creating': {'required': True}, + 'idle': {'required': True}, + 'offline': {'required': True}, + 'preempted': {'required': True}, + 'rebooting': {'required': True}, + 'reimaging': {'required': True}, + 'running': {'required': True}, + 'starting': {'required': True}, + 'start_task_failed': {'required': True}, + 'leaving_pool': {'required': True}, + 'unknown': {'required': True}, + 'unusable': {'required': True}, + 'waiting_for_start_task': {'required': True}, + 'total': {'required': True}, + } + + _attribute_map = { + 'creating': {'key': 'creating', 'type': 'int'}, + 'idle': {'key': 'idle', 'type': 'int'}, + 'offline': {'key': 'offline', 'type': 'int'}, + 'preempted': {'key': 'preempted', 'type': 'int'}, + 'rebooting': {'key': 'rebooting', 'type': 'int'}, + 'reimaging': {'key': 'reimaging', 'type': 'int'}, + 'running': {'key': 'running', 'type': 'int'}, + 'starting': {'key': 'starting', 'type': 'int'}, + 'start_task_failed': {'key': 'startTaskFailed', 'type': 'int'}, + 'leaving_pool': {'key': 'leavingPool', 'type': 'int'}, + 'unknown': {'key': 'unknown', 'type': 'int'}, + 'unusable': {'key': 'unusable', 'type': 'int'}, + 'waiting_for_start_task': {'key': 'waitingForStartTask', 'type': 'int'}, + 'total': {'key': 'total', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(NodeCounts, self).__init__(**kwargs) + self.creating = kwargs.get('creating', None) + self.idle = kwargs.get('idle', None) + self.offline = kwargs.get('offline', None) + self.preempted = kwargs.get('preempted', None) + self.rebooting = kwargs.get('rebooting', None) + self.reimaging = kwargs.get('reimaging', None) + self.running = kwargs.get('running', None) + self.starting = kwargs.get('starting', None) + self.start_task_failed = kwargs.get('start_task_failed', None) + self.leaving_pool = kwargs.get('leaving_pool', None) + self.unknown = kwargs.get('unknown', None) + self.unusable = kwargs.get('unusable', None) + self.waiting_for_start_task = kwargs.get('waiting_for_start_task', None) + self.total = kwargs.get('total', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/node_counts_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/node_counts_py3.py new file mode 100644 index 00000000..bfeca712 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/node_counts_py3.py @@ -0,0 +1,102 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeCounts(Model): + """The number of nodes in each node state. + + All required parameters must be populated in order to send to Azure. + + :param creating: Required. The number of nodes in the creating state. + :type creating: int + :param idle: Required. The number of nodes in the idle state. + :type idle: int + :param offline: Required. The number of nodes in the offline state. + :type offline: int + :param preempted: Required. The number of nodes in the preempted state. + :type preempted: int + :param rebooting: Required. The count of nodes in the rebooting state. + :type rebooting: int + :param reimaging: Required. The number of nodes in the reimaging state. + :type reimaging: int + :param running: Required. The number of nodes in the running state. + :type running: int + :param starting: Required. The number of nodes in the starting state. + :type starting: int + :param start_task_failed: Required. The number of nodes in the + startTaskFailed state. + :type start_task_failed: int + :param leaving_pool: Required. The number of nodes in the leavingPool + state. + :type leaving_pool: int + :param unknown: Required. The number of nodes in the unknown state. + :type unknown: int + :param unusable: Required. The number of nodes in the unusable state. + :type unusable: int + :param waiting_for_start_task: Required. The number of nodes in the + waitingForStartTask state. + :type waiting_for_start_task: int + :param total: Required. The total number of nodes. + :type total: int + """ + + _validation = { + 'creating': {'required': True}, + 'idle': {'required': True}, + 'offline': {'required': True}, + 'preempted': {'required': True}, + 'rebooting': {'required': True}, + 'reimaging': {'required': True}, + 'running': {'required': True}, + 'starting': {'required': True}, + 'start_task_failed': {'required': True}, + 'leaving_pool': {'required': True}, + 'unknown': {'required': True}, + 'unusable': {'required': True}, + 'waiting_for_start_task': {'required': True}, + 'total': {'required': True}, + } + + _attribute_map = { + 'creating': {'key': 'creating', 'type': 'int'}, + 'idle': {'key': 'idle', 'type': 'int'}, + 'offline': {'key': 'offline', 'type': 'int'}, + 'preempted': {'key': 'preempted', 'type': 'int'}, + 'rebooting': {'key': 'rebooting', 'type': 'int'}, + 'reimaging': {'key': 'reimaging', 'type': 'int'}, + 'running': {'key': 'running', 'type': 'int'}, + 'starting': {'key': 'starting', 'type': 'int'}, + 'start_task_failed': {'key': 'startTaskFailed', 'type': 'int'}, + 'leaving_pool': {'key': 'leavingPool', 'type': 'int'}, + 'unknown': {'key': 'unknown', 'type': 'int'}, + 'unusable': {'key': 'unusable', 'type': 'int'}, + 'waiting_for_start_task': {'key': 'waitingForStartTask', 'type': 'int'}, + 'total': {'key': 'total', 'type': 'int'}, + } + + def __init__(self, *, creating: int, idle: int, offline: int, preempted: int, rebooting: int, reimaging: int, running: int, starting: int, start_task_failed: int, leaving_pool: int, unknown: int, unusable: int, waiting_for_start_task: int, total: int, **kwargs) -> None: + super(NodeCounts, self).__init__(**kwargs) + self.creating = creating + self.idle = idle + self.offline = offline + self.preempted = preempted + self.rebooting = rebooting + self.reimaging = reimaging + self.running = running + self.starting = starting + self.start_task_failed = start_task_failed + self.leaving_pool = leaving_pool + self.unknown = unknown + self.unusable = unusable + self.waiting_for_start_task = waiting_for_start_task + self.total = total diff --git a/azext/generated/sdk/batch/v2018_08_01/models/node_disable_scheduling_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/node_disable_scheduling_parameter.py new file mode 100644 index 00000000..e92b0262 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/node_disable_scheduling_parameter.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeDisableSchedulingParameter(Model): + """Options for disabling scheduling on a compute node. + + :param node_disable_scheduling_option: What to do with currently running + tasks when disabling task scheduling on the compute node. The default + value is requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion' + :type node_disable_scheduling_option: str or + ~azure.batch.models.DisableComputeNodeSchedulingOption + """ + + _attribute_map = { + 'node_disable_scheduling_option': {'key': 'nodeDisableSchedulingOption', 'type': 'DisableComputeNodeSchedulingOption'}, + } + + def __init__(self, **kwargs): + super(NodeDisableSchedulingParameter, self).__init__(**kwargs) + self.node_disable_scheduling_option = kwargs.get('node_disable_scheduling_option', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/node_disable_scheduling_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/node_disable_scheduling_parameter_py3.py new file mode 100644 index 00000000..d6de68c5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/node_disable_scheduling_parameter_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeDisableSchedulingParameter(Model): + """Options for disabling scheduling on a compute node. + + :param node_disable_scheduling_option: What to do with currently running + tasks when disabling task scheduling on the compute node. The default + value is requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion' + :type node_disable_scheduling_option: str or + ~azure.batch.models.DisableComputeNodeSchedulingOption + """ + + _attribute_map = { + 'node_disable_scheduling_option': {'key': 'nodeDisableSchedulingOption', 'type': 'DisableComputeNodeSchedulingOption'}, + } + + def __init__(self, *, node_disable_scheduling_option=None, **kwargs) -> None: + super(NodeDisableSchedulingParameter, self).__init__(**kwargs) + self.node_disable_scheduling_option = node_disable_scheduling_option diff --git a/azext/generated/sdk/batch/v2018_08_01/models/node_file.py b/azext/generated/sdk/batch/v2018_08_01/models/node_file.py new file mode 100644 index 00000000..93fa29d6 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/node_file.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeFile(Model): + """Information about a file or directory on a compute node. + + :param name: The file path. + :type name: str + :param url: The URL of the file. + :type url: str + :param is_directory: Whether the object represents a directory. + :type is_directory: bool + :param properties: The file properties. + :type properties: ~azure.batch.models.FileProperties + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, + 'properties': {'key': 'properties', 'type': 'FileProperties'}, + } + + def __init__(self, **kwargs): + super(NodeFile, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.url = kwargs.get('url', None) + self.is_directory = kwargs.get('is_directory', None) + self.properties = kwargs.get('properties', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/node_file_paged.py b/azext/generated/sdk/batch/v2018_08_01/models/node_file_paged.py new file mode 100644 index 00000000..4463c944 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/node_file_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class NodeFilePaged(Paged): + """ + A paging container for iterating over a list of :class:`NodeFile ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[NodeFile]'} + } + + def __init__(self, *args, **kwargs): + + super(NodeFilePaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/node_file_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/node_file_py3.py new file mode 100644 index 00000000..410f310d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/node_file_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeFile(Model): + """Information about a file or directory on a compute node. + + :param name: The file path. + :type name: str + :param url: The URL of the file. + :type url: str + :param is_directory: Whether the object represents a directory. + :type is_directory: bool + :param properties: The file properties. + :type properties: ~azure.batch.models.FileProperties + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, + 'properties': {'key': 'properties', 'type': 'FileProperties'}, + } + + def __init__(self, *, name: str=None, url: str=None, is_directory: bool=None, properties=None, **kwargs) -> None: + super(NodeFile, self).__init__(**kwargs) + self.name = name + self.url = url + self.is_directory = is_directory + self.properties = properties diff --git a/azext/generated/sdk/batch/v2018_08_01/models/node_reboot_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/node_reboot_parameter.py new file mode 100644 index 00000000..10e13ad7 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/node_reboot_parameter.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeRebootParameter(Model): + """Options for rebooting a compute node. + + :param node_reboot_option: When to reboot the compute node and what to do + with currently running tasks. The default value is requeue. Possible + values include: 'requeue', 'terminate', 'taskCompletion', 'retainedData' + :type node_reboot_option: str or + ~azure.batch.models.ComputeNodeRebootOption + """ + + _attribute_map = { + 'node_reboot_option': {'key': 'nodeRebootOption', 'type': 'ComputeNodeRebootOption'}, + } + + def __init__(self, **kwargs): + super(NodeRebootParameter, self).__init__(**kwargs) + self.node_reboot_option = kwargs.get('node_reboot_option', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/node_reboot_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/node_reboot_parameter_py3.py new file mode 100644 index 00000000..0c21c6d1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/node_reboot_parameter_py3.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeRebootParameter(Model): + """Options for rebooting a compute node. + + :param node_reboot_option: When to reboot the compute node and what to do + with currently running tasks. The default value is requeue. Possible + values include: 'requeue', 'terminate', 'taskCompletion', 'retainedData' + :type node_reboot_option: str or + ~azure.batch.models.ComputeNodeRebootOption + """ + + _attribute_map = { + 'node_reboot_option': {'key': 'nodeRebootOption', 'type': 'ComputeNodeRebootOption'}, + } + + def __init__(self, *, node_reboot_option=None, **kwargs) -> None: + super(NodeRebootParameter, self).__init__(**kwargs) + self.node_reboot_option = node_reboot_option diff --git a/azext/generated/sdk/batch/v2018_08_01/models/node_reimage_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/node_reimage_parameter.py new file mode 100644 index 00000000..aa51f141 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/node_reimage_parameter.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeReimageParameter(Model): + """Options for reimaging a compute node. + + :param node_reimage_option: When to reimage the compute node and what to + do with currently running tasks. The default value is requeue. Possible + values include: 'requeue', 'terminate', 'taskCompletion', 'retainedData' + :type node_reimage_option: str or + ~azure.batch.models.ComputeNodeReimageOption + """ + + _attribute_map = { + 'node_reimage_option': {'key': 'nodeReimageOption', 'type': 'ComputeNodeReimageOption'}, + } + + def __init__(self, **kwargs): + super(NodeReimageParameter, self).__init__(**kwargs) + self.node_reimage_option = kwargs.get('node_reimage_option', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/node_reimage_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/node_reimage_parameter_py3.py new file mode 100644 index 00000000..7af39305 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/node_reimage_parameter_py3.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeReimageParameter(Model): + """Options for reimaging a compute node. + + :param node_reimage_option: When to reimage the compute node and what to + do with currently running tasks. The default value is requeue. Possible + values include: 'requeue', 'terminate', 'taskCompletion', 'retainedData' + :type node_reimage_option: str or + ~azure.batch.models.ComputeNodeReimageOption + """ + + _attribute_map = { + 'node_reimage_option': {'key': 'nodeReimageOption', 'type': 'ComputeNodeReimageOption'}, + } + + def __init__(self, *, node_reimage_option=None, **kwargs) -> None: + super(NodeReimageParameter, self).__init__(**kwargs) + self.node_reimage_option = node_reimage_option diff --git a/azext/generated/sdk/batch/v2018_08_01/models/node_remove_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/node_remove_parameter.py new file mode 100644 index 00000000..f997671b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/node_remove_parameter.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeRemoveParameter(Model): + """Options for removing compute nodes from a pool. + + All required parameters must be populated in order to send to Azure. + + :param node_list: Required. A list containing the IDs of the compute nodes + to be removed from the specified pool. + :type node_list: list[str] + :param resize_timeout: The timeout for removal of compute nodes to the + pool. The default value is 15 minutes. The minimum value is 5 minutes. If + you specify a value less than 5 minutes, the Batch service returns an + error; if you are calling the REST API directly, the HTTP status code is + 400 (Bad Request). + :type resize_timeout: timedelta + :param node_deallocation_option: Determines what to do with a node and its + running task(s) after it has been selected for deallocation. The default + value is requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion', 'retainedData' + :type node_deallocation_option: str or + ~azure.batch.models.ComputeNodeDeallocationOption + """ + + _validation = { + 'node_list': {'required': True, 'max_items': 100}, + } + + _attribute_map = { + 'node_list': {'key': 'nodeList', 'type': '[str]'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, + } + + def __init__(self, **kwargs): + super(NodeRemoveParameter, self).__init__(**kwargs) + self.node_list = kwargs.get('node_list', None) + self.resize_timeout = kwargs.get('resize_timeout', None) + self.node_deallocation_option = kwargs.get('node_deallocation_option', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/node_remove_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/node_remove_parameter_py3.py new file mode 100644 index 00000000..b9dbbc4e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/node_remove_parameter_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeRemoveParameter(Model): + """Options for removing compute nodes from a pool. + + All required parameters must be populated in order to send to Azure. + + :param node_list: Required. A list containing the IDs of the compute nodes + to be removed from the specified pool. + :type node_list: list[str] + :param resize_timeout: The timeout for removal of compute nodes to the + pool. The default value is 15 minutes. The minimum value is 5 minutes. If + you specify a value less than 5 minutes, the Batch service returns an + error; if you are calling the REST API directly, the HTTP status code is + 400 (Bad Request). + :type resize_timeout: timedelta + :param node_deallocation_option: Determines what to do with a node and its + running task(s) after it has been selected for deallocation. The default + value is requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion', 'retainedData' + :type node_deallocation_option: str or + ~azure.batch.models.ComputeNodeDeallocationOption + """ + + _validation = { + 'node_list': {'required': True, 'max_items': 100}, + } + + _attribute_map = { + 'node_list': {'key': 'nodeList', 'type': '[str]'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, + } + + def __init__(self, *, node_list, resize_timeout=None, node_deallocation_option=None, **kwargs) -> None: + super(NodeRemoveParameter, self).__init__(**kwargs) + self.node_list = node_list + self.resize_timeout = resize_timeout + self.node_deallocation_option = node_deallocation_option diff --git a/azext/generated/sdk/batch/v2018_08_01/models/node_update_user_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/node_update_user_parameter.py new file mode 100644 index 00000000..02df471c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/node_update_user_parameter.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeUpdateUserParameter(Model): + """The set of changes to be made to a user account on a node. + + :param password: The password of the account. The password is required for + Windows nodes (those created with 'cloudServiceConfiguration', or created + with 'virtualMachineConfiguration' using a Windows image reference). For + Linux compute nodes, the password can optionally be specified along with + the sshPublicKey property. If omitted, any existing password is removed. + :type password: str + :param expiry_time: The time at which the account should expire. If + omitted, the default is 1 day from the current time. For Linux compute + nodes, the expiryTime has a precision up to a day. + :type expiry_time: datetime + :param ssh_public_key: The SSH public key that can be used for remote + login to the compute node. The public key should be compatible with + OpenSSH encoding and should be base 64 encoded. This property can be + specified only for Linux nodes. If this is specified for a Windows node, + then the Batch service rejects the request; if you are calling the REST + API directly, the HTTP status code is 400 (Bad Request). If omitted, any + existing SSH public key is removed. + :type ssh_public_key: str + """ + + _attribute_map = { + 'password': {'key': 'password', 'type': 'str'}, + 'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'}, + 'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(NodeUpdateUserParameter, self).__init__(**kwargs) + self.password = kwargs.get('password', None) + self.expiry_time = kwargs.get('expiry_time', None) + self.ssh_public_key = kwargs.get('ssh_public_key', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/node_update_user_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/node_update_user_parameter_py3.py new file mode 100644 index 00000000..3ff93927 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/node_update_user_parameter_py3.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeUpdateUserParameter(Model): + """The set of changes to be made to a user account on a node. + + :param password: The password of the account. The password is required for + Windows nodes (those created with 'cloudServiceConfiguration', or created + with 'virtualMachineConfiguration' using a Windows image reference). For + Linux compute nodes, the password can optionally be specified along with + the sshPublicKey property. If omitted, any existing password is removed. + :type password: str + :param expiry_time: The time at which the account should expire. If + omitted, the default is 1 day from the current time. For Linux compute + nodes, the expiryTime has a precision up to a day. + :type expiry_time: datetime + :param ssh_public_key: The SSH public key that can be used for remote + login to the compute node. The public key should be compatible with + OpenSSH encoding and should be base 64 encoded. This property can be + specified only for Linux nodes. If this is specified for a Windows node, + then the Batch service rejects the request; if you are calling the REST + API directly, the HTTP status code is 400 (Bad Request). If omitted, any + existing SSH public key is removed. + :type ssh_public_key: str + """ + + _attribute_map = { + 'password': {'key': 'password', 'type': 'str'}, + 'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'}, + 'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'}, + } + + def __init__(self, *, password: str=None, expiry_time=None, ssh_public_key: str=None, **kwargs) -> None: + super(NodeUpdateUserParameter, self).__init__(**kwargs) + self.password = password + self.expiry_time = expiry_time + self.ssh_public_key = ssh_public_key diff --git a/azext/generated/sdk/batch/v2018_08_01/models/os_disk.py b/azext/generated/sdk/batch/v2018_08_01/models/os_disk.py new file mode 100644 index 00000000..08916fa2 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/os_disk.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OSDisk(Model): + """Settings for the operating system disk of the virtual machine. + + :param caching: The type of caching to enable for the OS disk. The default + value for caching is readwrite. For information about the caching options + see: + https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. + Possible values include: 'none', 'readOnly', 'readWrite' + :type caching: str or ~azure.batch.models.CachingType + """ + + _attribute_map = { + 'caching': {'key': 'caching', 'type': 'CachingType'}, + } + + def __init__(self, **kwargs): + super(OSDisk, self).__init__(**kwargs) + self.caching = kwargs.get('caching', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/os_disk_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/os_disk_py3.py new file mode 100644 index 00000000..4780c4fa --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/os_disk_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OSDisk(Model): + """Settings for the operating system disk of the virtual machine. + + :param caching: The type of caching to enable for the OS disk. The default + value for caching is readwrite. For information about the caching options + see: + https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. + Possible values include: 'none', 'readOnly', 'readWrite' + :type caching: str or ~azure.batch.models.CachingType + """ + + _attribute_map = { + 'caching': {'key': 'caching', 'type': 'CachingType'}, + } + + def __init__(self, *, caching=None, **kwargs) -> None: + super(OSDisk, self).__init__(**kwargs) + self.caching = caching diff --git a/azext/generated/sdk/batch/v2018_08_01/models/output_file.py b/azext/generated/sdk/batch/v2018_08_01/models/output_file.py new file mode 100644 index 00000000..b16fa592 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/output_file.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFile(Model): + """A specification for uploading files from an Azure Batch node to another + location after the Batch service has finished executing the task process. + + All required parameters must be populated in order to send to Azure. + + :param file_pattern: Required. A pattern indicating which file(s) to + upload. Both relative and absolute paths are supported. Relative paths are + relative to the task working directory. The following wildcards are + supported: * matches 0 or more characters (for example pattern abc* would + match abc or abcdef), ** matches any directory, ? matches any single + character, [abc] matches one character in the brackets, and [a-c] matches + one character in the range. Brackets can include a negation to match any + character not specified (for example [!abc] matches any character but a, + b, or c). If a file name starts with "." it is ignored by default but may + be matched by specifying it explicitly (for example *.gif will not match + .a.gif, but .*.gif will). A simple example: **\\*.txt matches any file + that does not start in '.' and ends with .txt in the task working + directory or any subdirectory. If the filename contains a wildcard + character it can be escaped using brackets (for example abc[*] would match + a file named abc*). Note that both \\ and / are treated as directory + separators on Windows, but only / is on Linux. Environment variables + (%var% on Windows or $var on Linux) are expanded prior to the pattern + being applied. + :type file_pattern: str + :param destination: Required. The destination for the output file(s). + :type destination: ~azure.batch.models.OutputFileDestination + :param upload_options: Required. Additional options for the upload + operation, including under what conditions to perform the upload. + :type upload_options: ~azure.batch.models.OutputFileUploadOptions + """ + + _validation = { + 'file_pattern': {'required': True}, + 'destination': {'required': True}, + 'upload_options': {'required': True}, + } + + _attribute_map = { + 'file_pattern': {'key': 'filePattern', 'type': 'str'}, + 'destination': {'key': 'destination', 'type': 'OutputFileDestination'}, + 'upload_options': {'key': 'uploadOptions', 'type': 'OutputFileUploadOptions'}, + } + + def __init__(self, **kwargs): + super(OutputFile, self).__init__(**kwargs) + self.file_pattern = kwargs.get('file_pattern', None) + self.destination = kwargs.get('destination', None) + self.upload_options = kwargs.get('upload_options', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/output_file_blob_container_destination.py b/azext/generated/sdk/batch/v2018_08_01/models/output_file_blob_container_destination.py new file mode 100644 index 00000000..ee86a589 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/output_file_blob_container_destination.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileBlobContainerDestination(Model): + """Specifies a file upload destination within an Azure blob storage container. + + All required parameters must be populated in order to send to Azure. + + :param path: The destination blob or virtual directory within the Azure + Storage container. If filePattern refers to a specific file (i.e. contains + no wildcards), then path is the name of the blob to which to upload that + file. If filePattern contains one or more wildcards (and therefore may + match multiple files), then path is the name of the blob virtual directory + (which is prepended to each blob name) to which to upload the file(s). If + omitted, file(s) are uploaded to the root of the container with a blob + name matching their file name. + :type path: str + :param container_url: Required. The URL of the container within Azure Blob + Storage to which to upload the file(s). The URL must include a Shared + Access Signature (SAS) granting write permissions to the container. + :type container_url: str + """ + + _validation = { + 'container_url': {'required': True}, + } + + _attribute_map = { + 'path': {'key': 'path', 'type': 'str'}, + 'container_url': {'key': 'containerUrl', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(OutputFileBlobContainerDestination, self).__init__(**kwargs) + self.path = kwargs.get('path', None) + self.container_url = kwargs.get('container_url', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/output_file_blob_container_destination_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/output_file_blob_container_destination_py3.py new file mode 100644 index 00000000..3f0c9ce0 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/output_file_blob_container_destination_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileBlobContainerDestination(Model): + """Specifies a file upload destination within an Azure blob storage container. + + All required parameters must be populated in order to send to Azure. + + :param path: The destination blob or virtual directory within the Azure + Storage container. If filePattern refers to a specific file (i.e. contains + no wildcards), then path is the name of the blob to which to upload that + file. If filePattern contains one or more wildcards (and therefore may + match multiple files), then path is the name of the blob virtual directory + (which is prepended to each blob name) to which to upload the file(s). If + omitted, file(s) are uploaded to the root of the container with a blob + name matching their file name. + :type path: str + :param container_url: Required. The URL of the container within Azure Blob + Storage to which to upload the file(s). The URL must include a Shared + Access Signature (SAS) granting write permissions to the container. + :type container_url: str + """ + + _validation = { + 'container_url': {'required': True}, + } + + _attribute_map = { + 'path': {'key': 'path', 'type': 'str'}, + 'container_url': {'key': 'containerUrl', 'type': 'str'}, + } + + def __init__(self, *, container_url: str, path: str=None, **kwargs) -> None: + super(OutputFileBlobContainerDestination, self).__init__(**kwargs) + self.path = path + self.container_url = container_url diff --git a/azext/generated/sdk/batch/v2018_08_01/models/output_file_destination.py b/azext/generated/sdk/batch/v2018_08_01/models/output_file_destination.py new file mode 100644 index 00000000..1033743c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/output_file_destination.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileDestination(Model): + """The destination to which a file should be uploaded. + + :param container: A location in Azure blob storage to which files are + uploaded. + :type container: ~azure.batch.models.OutputFileBlobContainerDestination + """ + + _attribute_map = { + 'container': {'key': 'container', 'type': 'OutputFileBlobContainerDestination'}, + } + + def __init__(self, **kwargs): + super(OutputFileDestination, self).__init__(**kwargs) + self.container = kwargs.get('container', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/output_file_destination_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/output_file_destination_py3.py new file mode 100644 index 00000000..e7c652b6 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/output_file_destination_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileDestination(Model): + """The destination to which a file should be uploaded. + + :param container: A location in Azure blob storage to which files are + uploaded. + :type container: ~azure.batch.models.OutputFileBlobContainerDestination + """ + + _attribute_map = { + 'container': {'key': 'container', 'type': 'OutputFileBlobContainerDestination'}, + } + + def __init__(self, *, container=None, **kwargs) -> None: + super(OutputFileDestination, self).__init__(**kwargs) + self.container = container diff --git a/azext/generated/sdk/batch/v2018_08_01/models/output_file_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/output_file_py3.py new file mode 100644 index 00000000..fee0d502 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/output_file_py3.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFile(Model): + """A specification for uploading files from an Azure Batch node to another + location after the Batch service has finished executing the task process. + + All required parameters must be populated in order to send to Azure. + + :param file_pattern: Required. A pattern indicating which file(s) to + upload. Both relative and absolute paths are supported. Relative paths are + relative to the task working directory. The following wildcards are + supported: * matches 0 or more characters (for example pattern abc* would + match abc or abcdef), ** matches any directory, ? matches any single + character, [abc] matches one character in the brackets, and [a-c] matches + one character in the range. Brackets can include a negation to match any + character not specified (for example [!abc] matches any character but a, + b, or c). If a file name starts with "." it is ignored by default but may + be matched by specifying it explicitly (for example *.gif will not match + .a.gif, but .*.gif will). A simple example: **\\*.txt matches any file + that does not start in '.' and ends with .txt in the task working + directory or any subdirectory. If the filename contains a wildcard + character it can be escaped using brackets (for example abc[*] would match + a file named abc*). Note that both \\ and / are treated as directory + separators on Windows, but only / is on Linux. Environment variables + (%var% on Windows or $var on Linux) are expanded prior to the pattern + being applied. + :type file_pattern: str + :param destination: Required. The destination for the output file(s). + :type destination: ~azure.batch.models.OutputFileDestination + :param upload_options: Required. Additional options for the upload + operation, including under what conditions to perform the upload. + :type upload_options: ~azure.batch.models.OutputFileUploadOptions + """ + + _validation = { + 'file_pattern': {'required': True}, + 'destination': {'required': True}, + 'upload_options': {'required': True}, + } + + _attribute_map = { + 'file_pattern': {'key': 'filePattern', 'type': 'str'}, + 'destination': {'key': 'destination', 'type': 'OutputFileDestination'}, + 'upload_options': {'key': 'uploadOptions', 'type': 'OutputFileUploadOptions'}, + } + + def __init__(self, *, file_pattern: str, destination, upload_options, **kwargs) -> None: + super(OutputFile, self).__init__(**kwargs) + self.file_pattern = file_pattern + self.destination = destination + self.upload_options = upload_options diff --git a/azext/generated/sdk/batch/v2018_08_01/models/output_file_upload_options.py b/azext/generated/sdk/batch/v2018_08_01/models/output_file_upload_options.py new file mode 100644 index 00000000..c626a355 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/output_file_upload_options.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileUploadOptions(Model): + """Details about an output file upload operation, including under what + conditions to perform the upload. + + All required parameters must be populated in order to send to Azure. + + :param upload_condition: Required. The conditions under which the task + output file or set of files should be uploaded. The default is + taskcompletion. Possible values include: 'taskSuccess', 'taskFailure', + 'taskCompletion' + :type upload_condition: str or + ~azure.batch.models.OutputFileUploadCondition + """ + + _validation = { + 'upload_condition': {'required': True}, + } + + _attribute_map = { + 'upload_condition': {'key': 'uploadCondition', 'type': 'OutputFileUploadCondition'}, + } + + def __init__(self, **kwargs): + super(OutputFileUploadOptions, self).__init__(**kwargs) + self.upload_condition = kwargs.get('upload_condition', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/output_file_upload_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/output_file_upload_options_py3.py new file mode 100644 index 00000000..628d8794 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/output_file_upload_options_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileUploadOptions(Model): + """Details about an output file upload operation, including under what + conditions to perform the upload. + + All required parameters must be populated in order to send to Azure. + + :param upload_condition: Required. The conditions under which the task + output file or set of files should be uploaded. The default is + taskcompletion. Possible values include: 'taskSuccess', 'taskFailure', + 'taskCompletion' + :type upload_condition: str or + ~azure.batch.models.OutputFileUploadCondition + """ + + _validation = { + 'upload_condition': {'required': True}, + } + + _attribute_map = { + 'upload_condition': {'key': 'uploadCondition', 'type': 'OutputFileUploadCondition'}, + } + + def __init__(self, *, upload_condition, **kwargs) -> None: + super(OutputFileUploadOptions, self).__init__(**kwargs) + self.upload_condition = upload_condition diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_add_options.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_add_options.py new file mode 100644 index 00000000..04d968a8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_add_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolAddOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_add_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_add_options_py3.py new file mode 100644 index 00000000..62b3e62b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_add_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolAddOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_add_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_add_parameter.py new file mode 100644 index 00000000..7d21c67b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_add_parameter.py @@ -0,0 +1,197 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolAddParameter(Model): + """A pool in the Azure Batch service to add. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the pool within the + account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two pool IDs within an account that differ only by case). + :type id: str + :param display_name: The display name for the pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param vm_size: Required. The size of virtual machines in the pool. All + virtual machines in a pool are the same size. For information about + available sizes of virtual machines for Cloud Services pools (pools + created with cloudServiceConfiguration), see Sizes for Cloud Services + (https://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). + Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and + A2V2. For information about available VM sizes for pools using images from + the Virtual Machines Marketplace (pools created with + virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) + (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) + or Sizes for Virtual Machines (Windows) + (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). + Batch supports all Azure VM sizes except STANDARD_A0 and those with + premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the pool. This property and virtualMachineConfiguration are mutually + exclusive and one of the properties must be specified. This property + cannot be specified if the Batch account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the pool. This property and cloudServiceConfiguration are mutually + exclusive and one of the properties must be specified. + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param resize_timeout: The timeout for allocation of compute nodes to the + pool. This timeout applies only to manual scaling; it has no effect when + enableAutoScale is set to true. The default value is 15 minutes. The + minimum value is 5 minutes. If you specify a value less than 5 minutes, + the Batch service returns an error; if you are calling the REST API + directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param target_dedicated_nodes: The desired number of dedicated compute + nodes in the pool. This property must not be specified if enableAutoScale + is set to true. If enableAutoScale is set to false, then you must set + either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + compute nodes in the pool. This property must not be specified if + enableAutoScale is set to true. If enableAutoScale is set to false, then + you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + property is required and the pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: A formula for the desired number of compute + nodes in the pool. This property must not be specified if enableAutoScale + is set to false. It is required if enableAutoScale is set to true. The + formula is checked for validity before the pool is created. If the formula + is not valid, the Batch service rejects the request with detailed error + information. For more information about specifying this formula, see + 'Automatically scale compute nodes in an Azure Batch pool' + (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service returns an error; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + :type auto_scale_evaluation_interval: timedelta + :param enable_inter_node_communication: Whether the pool permits direct + communication between nodes. Enabling inter-node communication limits the + maximum size of the pool due to deployment restrictions on the nodes of + the pool. This may result in the pool not reaching its desired size. The + default value is false. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A task specified to run on each compute node as it + joins the pool. The task runs when the node is added to the pool or when + the node is restarted. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: The list of certificates to be installed on + each compute node in the pool. For Windows compute nodes, the Batch + service installs the certificates to the specified certificate store and + location. For Linux compute nodes, the certificates are stored in a + directory inside the task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this + location. For certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of application packages to + be installed on each compute node in the pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each compute node in the pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + pool creation will fail. + :type application_licenses: list[str] + :param max_tasks_per_node: The maximum number of tasks that can run + concurrently on a single compute node in the pool. The default value is 1. + The maximum value of this setting depends on the size of the compute nodes + in the pool (the vmSize setting). + :type max_tasks_per_node: int + :param task_scheduling_policy: How tasks are distributed across compute + nodes in a pool. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param user_accounts: The list of user accounts to be created on each node + in the pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the pool as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'id': {'required': True}, + 'vm_size': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(PoolAddParameter, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.vm_size = kwargs.get('vm_size', None) + self.cloud_service_configuration = kwargs.get('cloud_service_configuration', None) + self.virtual_machine_configuration = kwargs.get('virtual_machine_configuration', None) + self.resize_timeout = kwargs.get('resize_timeout', None) + self.target_dedicated_nodes = kwargs.get('target_dedicated_nodes', None) + self.target_low_priority_nodes = kwargs.get('target_low_priority_nodes', None) + self.enable_auto_scale = kwargs.get('enable_auto_scale', None) + self.auto_scale_formula = kwargs.get('auto_scale_formula', None) + self.auto_scale_evaluation_interval = kwargs.get('auto_scale_evaluation_interval', None) + self.enable_inter_node_communication = kwargs.get('enable_inter_node_communication', None) + self.network_configuration = kwargs.get('network_configuration', None) + self.start_task = kwargs.get('start_task', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.application_licenses = kwargs.get('application_licenses', None) + self.max_tasks_per_node = kwargs.get('max_tasks_per_node', None) + self.task_scheduling_policy = kwargs.get('task_scheduling_policy', None) + self.user_accounts = kwargs.get('user_accounts', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_add_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_add_parameter_py3.py new file mode 100644 index 00000000..170963fa --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_add_parameter_py3.py @@ -0,0 +1,197 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolAddParameter(Model): + """A pool in the Azure Batch service to add. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the pool within the + account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two pool IDs within an account that differ only by case). + :type id: str + :param display_name: The display name for the pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param vm_size: Required. The size of virtual machines in the pool. All + virtual machines in a pool are the same size. For information about + available sizes of virtual machines for Cloud Services pools (pools + created with cloudServiceConfiguration), see Sizes for Cloud Services + (https://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). + Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and + A2V2. For information about available VM sizes for pools using images from + the Virtual Machines Marketplace (pools created with + virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) + (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) + or Sizes for Virtual Machines (Windows) + (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). + Batch supports all Azure VM sizes except STANDARD_A0 and those with + premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the pool. This property and virtualMachineConfiguration are mutually + exclusive and one of the properties must be specified. This property + cannot be specified if the Batch account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the pool. This property and cloudServiceConfiguration are mutually + exclusive and one of the properties must be specified. + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param resize_timeout: The timeout for allocation of compute nodes to the + pool. This timeout applies only to manual scaling; it has no effect when + enableAutoScale is set to true. The default value is 15 minutes. The + minimum value is 5 minutes. If you specify a value less than 5 minutes, + the Batch service returns an error; if you are calling the REST API + directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param target_dedicated_nodes: The desired number of dedicated compute + nodes in the pool. This property must not be specified if enableAutoScale + is set to true. If enableAutoScale is set to false, then you must set + either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + compute nodes in the pool. This property must not be specified if + enableAutoScale is set to true. If enableAutoScale is set to false, then + you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + property is required and the pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: A formula for the desired number of compute + nodes in the pool. This property must not be specified if enableAutoScale + is set to false. It is required if enableAutoScale is set to true. The + formula is checked for validity before the pool is created. If the formula + is not valid, the Batch service rejects the request with detailed error + information. For more information about specifying this formula, see + 'Automatically scale compute nodes in an Azure Batch pool' + (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service returns an error; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + :type auto_scale_evaluation_interval: timedelta + :param enable_inter_node_communication: Whether the pool permits direct + communication between nodes. Enabling inter-node communication limits the + maximum size of the pool due to deployment restrictions on the nodes of + the pool. This may result in the pool not reaching its desired size. The + default value is false. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A task specified to run on each compute node as it + joins the pool. The task runs when the node is added to the pool or when + the node is restarted. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: The list of certificates to be installed on + each compute node in the pool. For Windows compute nodes, the Batch + service installs the certificates to the specified certificate store and + location. For Linux compute nodes, the certificates are stored in a + directory inside the task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this + location. For certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of application packages to + be installed on each compute node in the pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each compute node in the pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + pool creation will fail. + :type application_licenses: list[str] + :param max_tasks_per_node: The maximum number of tasks that can run + concurrently on a single compute node in the pool. The default value is 1. + The maximum value of this setting depends on the size of the compute nodes + in the pool (the vmSize setting). + :type max_tasks_per_node: int + :param task_scheduling_policy: How tasks are distributed across compute + nodes in a pool. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param user_accounts: The list of user accounts to be created on each node + in the pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the pool as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'id': {'required': True}, + 'vm_size': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, id: str, vm_size: str, display_name: str=None, cloud_service_configuration=None, virtual_machine_configuration=None, resize_timeout=None, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, enable_auto_scale: bool=None, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, enable_inter_node_communication: bool=None, network_configuration=None, start_task=None, certificate_references=None, application_package_references=None, application_licenses=None, max_tasks_per_node: int=None, task_scheduling_policy=None, user_accounts=None, metadata=None, **kwargs) -> None: + super(PoolAddParameter, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.vm_size = vm_size + self.cloud_service_configuration = cloud_service_configuration + self.virtual_machine_configuration = virtual_machine_configuration + self.resize_timeout = resize_timeout + self.target_dedicated_nodes = target_dedicated_nodes + self.target_low_priority_nodes = target_low_priority_nodes + self.enable_auto_scale = enable_auto_scale + self.auto_scale_formula = auto_scale_formula + self.auto_scale_evaluation_interval = auto_scale_evaluation_interval + self.enable_inter_node_communication = enable_inter_node_communication + self.network_configuration = network_configuration + self.start_task = start_task + self.certificate_references = certificate_references + self.application_package_references = application_package_references + self.application_licenses = application_licenses + self.max_tasks_per_node = max_tasks_per_node + self.task_scheduling_policy = task_scheduling_policy + self.user_accounts = user_accounts + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_delete_options.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_delete_options.py new file mode 100644 index 00000000..622241dc --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_delete_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolDeleteOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_delete_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_delete_options_py3.py new file mode 100644 index 00000000..7ca41443 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_delete_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolDeleteOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_disable_auto_scale_options.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_disable_auto_scale_options.py new file mode 100644 index 00000000..96b0bc7c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_disable_auto_scale_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolDisableAutoScaleOptions(Model): + """Additional parameters for disable_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolDisableAutoScaleOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_disable_auto_scale_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_disable_auto_scale_options_py3.py new file mode 100644 index 00000000..4a069bd0 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_disable_auto_scale_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolDisableAutoScaleOptions(Model): + """Additional parameters for disable_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolDisableAutoScaleOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_enable_auto_scale_options.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_enable_auto_scale_options.py new file mode 100644 index 00000000..dd77582f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_enable_auto_scale_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEnableAutoScaleOptions(Model): + """Additional parameters for enable_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolEnableAutoScaleOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_enable_auto_scale_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_enable_auto_scale_options_py3.py new file mode 100644 index 00000000..507bd702 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_enable_auto_scale_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEnableAutoScaleOptions(Model): + """Additional parameters for enable_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolEnableAutoScaleOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_enable_auto_scale_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_enable_auto_scale_parameter.py new file mode 100644 index 00000000..793c71c5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_enable_auto_scale_parameter.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEnableAutoScaleParameter(Model): + """Options for enabling automatic scaling on a pool. + + :param auto_scale_formula: The formula for the desired number of compute + nodes in the pool. The formula is checked for validity before it is + applied to the pool. If the formula is not valid, the Batch service + rejects the request with detailed error information. For more information + about specifying this formula, see Automatically scale compute nodes in an + Azure Batch pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service rejects the request with an + invalid property value error; if you are calling the REST API directly, + the HTTP status code is 400 (Bad Request). If you specify a new interval, + then the existing autoscale evaluation schedule will be stopped and a new + autoscale evaluation schedule will be started, with its starting time + being the time when this request was issued. + :type auto_scale_evaluation_interval: timedelta + """ + + _attribute_map = { + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(PoolEnableAutoScaleParameter, self).__init__(**kwargs) + self.auto_scale_formula = kwargs.get('auto_scale_formula', None) + self.auto_scale_evaluation_interval = kwargs.get('auto_scale_evaluation_interval', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_enable_auto_scale_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_enable_auto_scale_parameter_py3.py new file mode 100644 index 00000000..1c0019e4 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_enable_auto_scale_parameter_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEnableAutoScaleParameter(Model): + """Options for enabling automatic scaling on a pool. + + :param auto_scale_formula: The formula for the desired number of compute + nodes in the pool. The formula is checked for validity before it is + applied to the pool. If the formula is not valid, the Batch service + rejects the request with detailed error information. For more information + about specifying this formula, see Automatically scale compute nodes in an + Azure Batch pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service rejects the request with an + invalid property value error; if you are calling the REST API directly, + the HTTP status code is 400 (Bad Request). If you specify a new interval, + then the existing autoscale evaluation schedule will be stopped and a new + autoscale evaluation schedule will be started, with its starting time + being the time when this request was issued. + :type auto_scale_evaluation_interval: timedelta + """ + + _attribute_map = { + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + } + + def __init__(self, *, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, **kwargs) -> None: + super(PoolEnableAutoScaleParameter, self).__init__(**kwargs) + self.auto_scale_formula = auto_scale_formula + self.auto_scale_evaluation_interval = auto_scale_evaluation_interval diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_endpoint_configuration.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_endpoint_configuration.py new file mode 100644 index 00000000..97859ff2 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_endpoint_configuration.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEndpointConfiguration(Model): + """The endpoint configuration for a pool. + + All required parameters must be populated in order to send to Azure. + + :param inbound_nat_pools: Required. A list of inbound NAT pools that can + be used to address specific ports on an individual compute node + externally. The maximum number of inbound NAT pools per Batch pool is 5. + If the maximum number of inbound NAT pools is exceeded the request fails + with HTTP status code 400. + :type inbound_nat_pools: list[~azure.batch.models.InboundNATPool] + """ + + _validation = { + 'inbound_nat_pools': {'required': True}, + } + + _attribute_map = { + 'inbound_nat_pools': {'key': 'inboundNATPools', 'type': '[InboundNATPool]'}, + } + + def __init__(self, **kwargs): + super(PoolEndpointConfiguration, self).__init__(**kwargs) + self.inbound_nat_pools = kwargs.get('inbound_nat_pools', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_endpoint_configuration_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_endpoint_configuration_py3.py new file mode 100644 index 00000000..95788b53 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_endpoint_configuration_py3.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEndpointConfiguration(Model): + """The endpoint configuration for a pool. + + All required parameters must be populated in order to send to Azure. + + :param inbound_nat_pools: Required. A list of inbound NAT pools that can + be used to address specific ports on an individual compute node + externally. The maximum number of inbound NAT pools per Batch pool is 5. + If the maximum number of inbound NAT pools is exceeded the request fails + with HTTP status code 400. + :type inbound_nat_pools: list[~azure.batch.models.InboundNATPool] + """ + + _validation = { + 'inbound_nat_pools': {'required': True}, + } + + _attribute_map = { + 'inbound_nat_pools': {'key': 'inboundNATPools', 'type': '[InboundNATPool]'}, + } + + def __init__(self, *, inbound_nat_pools, **kwargs) -> None: + super(PoolEndpointConfiguration, self).__init__(**kwargs) + self.inbound_nat_pools = inbound_nat_pools diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_evaluate_auto_scale_options.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_evaluate_auto_scale_options.py new file mode 100644 index 00000000..5fbb7ad3 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_evaluate_auto_scale_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEvaluateAutoScaleOptions(Model): + """Additional parameters for evaluate_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolEvaluateAutoScaleOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_evaluate_auto_scale_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_evaluate_auto_scale_options_py3.py new file mode 100644 index 00000000..a2f09b9d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_evaluate_auto_scale_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEvaluateAutoScaleOptions(Model): + """Additional parameters for evaluate_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolEvaluateAutoScaleOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_evaluate_auto_scale_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_evaluate_auto_scale_parameter.py new file mode 100644 index 00000000..c74cfac2 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_evaluate_auto_scale_parameter.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEvaluateAutoScaleParameter(Model): + """Options for evaluating an automatic scaling formula on a pool. + + All required parameters must be populated in order to send to Azure. + + :param auto_scale_formula: Required. The formula for the desired number of + compute nodes in the pool. The formula is validated and its results + calculated, but it is not applied to the pool. To apply the formula to the + pool, 'Enable automatic scaling on a pool'. For more information about + specifying this formula, see Automatically scale compute nodes in an Azure + Batch pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + """ + + _validation = { + 'auto_scale_formula': {'required': True}, + } + + _attribute_map = { + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(PoolEvaluateAutoScaleParameter, self).__init__(**kwargs) + self.auto_scale_formula = kwargs.get('auto_scale_formula', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_evaluate_auto_scale_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_evaluate_auto_scale_parameter_py3.py new file mode 100644 index 00000000..5102b28e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_evaluate_auto_scale_parameter_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEvaluateAutoScaleParameter(Model): + """Options for evaluating an automatic scaling formula on a pool. + + All required parameters must be populated in order to send to Azure. + + :param auto_scale_formula: Required. The formula for the desired number of + compute nodes in the pool. The formula is validated and its results + calculated, but it is not applied to the pool. To apply the formula to the + pool, 'Enable automatic scaling on a pool'. For more information about + specifying this formula, see Automatically scale compute nodes in an Azure + Batch pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + """ + + _validation = { + 'auto_scale_formula': {'required': True}, + } + + _attribute_map = { + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + } + + def __init__(self, *, auto_scale_formula: str, **kwargs) -> None: + super(PoolEvaluateAutoScaleParameter, self).__init__(**kwargs) + self.auto_scale_formula = auto_scale_formula diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_exists_options.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_exists_options.py new file mode 100644 index 00000000..feffd1c9 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_exists_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolExistsOptions(Model): + """Additional parameters for exists operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolExistsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_exists_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_exists_options_py3.py new file mode 100644 index 00000000..de152edb --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_exists_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolExistsOptions(Model): + """Additional parameters for exists operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolExistsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_get_all_lifetime_statistics_options.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_get_all_lifetime_statistics_options.py new file mode 100644 index 00000000..dbbbcf45 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_get_all_lifetime_statistics_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolGetAllLifetimeStatisticsOptions(Model): + """Additional parameters for get_all_lifetime_statistics operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolGetAllLifetimeStatisticsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_get_all_lifetime_statistics_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_get_all_lifetime_statistics_options_py3.py new file mode 100644 index 00000000..0fc18020 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_get_all_lifetime_statistics_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolGetAllLifetimeStatisticsOptions(Model): + """Additional parameters for get_all_lifetime_statistics operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolGetAllLifetimeStatisticsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_get_options.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_get_options.py new file mode 100644 index 00000000..a629c21e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_get_options.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_get_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_get_options_py3.py new file mode 100644 index 00000000..c0b04bd5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_get_options_py3.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolGetOptions, self).__init__(**kwargs) + self.select = select + self.expand = expand + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_information.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_information.py new file mode 100644 index 00000000..132e32bb --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_information.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolInformation(Model): + """Specifies how a job should be assigned to a pool. + + :param pool_id: The ID of an existing pool. All the tasks of the job will + run on the specified pool. You must ensure that the pool referenced by + this property exists. If the pool does not exist at the time the Batch + service tries to schedule a job, no tasks for the job will run until you + create a pool with that id. Note that the Batch service will not reject + the job request; it will simply not run tasks until the pool exists. You + must specify either the pool ID or the auto pool specification, but not + both. + :type pool_id: str + :param auto_pool_specification: Characteristics for a temporary 'auto + pool'. The Batch service will create this auto pool when the job is + submitted. If auto pool creation fails, the Batch service moves the job to + a completed state, and the pool creation error is set in the job's + scheduling error property. The Batch service manages the lifetime (both + creation and, unless keepAlive is specified, deletion) of the auto pool. + Any user actions that affect the lifetime of the auto pool while the job + is active will result in unexpected behavior. You must specify either the + pool ID or the auto pool specification, but not both. + :type auto_pool_specification: ~azure.batch.models.AutoPoolSpecification + """ + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'auto_pool_specification': {'key': 'autoPoolSpecification', 'type': 'AutoPoolSpecification'}, + } + + def __init__(self, **kwargs): + super(PoolInformation, self).__init__(**kwargs) + self.pool_id = kwargs.get('pool_id', None) + self.auto_pool_specification = kwargs.get('auto_pool_specification', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_information_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_information_py3.py new file mode 100644 index 00000000..6fc8d2ce --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_information_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolInformation(Model): + """Specifies how a job should be assigned to a pool. + + :param pool_id: The ID of an existing pool. All the tasks of the job will + run on the specified pool. You must ensure that the pool referenced by + this property exists. If the pool does not exist at the time the Batch + service tries to schedule a job, no tasks for the job will run until you + create a pool with that id. Note that the Batch service will not reject + the job request; it will simply not run tasks until the pool exists. You + must specify either the pool ID or the auto pool specification, but not + both. + :type pool_id: str + :param auto_pool_specification: Characteristics for a temporary 'auto + pool'. The Batch service will create this auto pool when the job is + submitted. If auto pool creation fails, the Batch service moves the job to + a completed state, and the pool creation error is set in the job's + scheduling error property. The Batch service manages the lifetime (both + creation and, unless keepAlive is specified, deletion) of the auto pool. + Any user actions that affect the lifetime of the auto pool while the job + is active will result in unexpected behavior. You must specify either the + pool ID or the auto pool specification, but not both. + :type auto_pool_specification: ~azure.batch.models.AutoPoolSpecification + """ + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'auto_pool_specification': {'key': 'autoPoolSpecification', 'type': 'AutoPoolSpecification'}, + } + + def __init__(self, *, pool_id: str=None, auto_pool_specification=None, **kwargs) -> None: + super(PoolInformation, self).__init__(**kwargs) + self.pool_id = pool_id + self.auto_pool_specification = auto_pool_specification diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_list_options.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_list_options.py new file mode 100644 index 00000000..1b37afe6 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_list_options.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 pools can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_list_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_list_options_py3.py new file mode 100644 index 00000000..5cc33a41 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_list_options_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 pools can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.expand = expand + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_list_usage_metrics_options.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_list_usage_metrics_options.py new file mode 100644 index 00000000..5b52f71a --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_list_usage_metrics_options.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolListUsageMetricsOptions(Model): + """Additional parameters for list_usage_metrics operation. + + :param start_time: The earliest time from which to include metrics. This + must be at least two and a half hours before the current time. If not + specified this defaults to the start time of the last aggregation interval + currently available. + :type start_time: datetime + :param end_time: The latest time from which to include metrics. This must + be at least two hours before the current time. If not specified this + defaults to the end time of the last aggregation interval currently + available. + :type end_time: datetime + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 results will be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'start_time': {'key': '', 'type': 'iso-8601'}, + 'end_time': {'key': '', 'type': 'iso-8601'}, + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolListUsageMetricsOptions, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.filter = kwargs.get('filter', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_list_usage_metrics_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_list_usage_metrics_options_py3.py new file mode 100644 index 00000000..2141cfa5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_list_usage_metrics_options_py3.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolListUsageMetricsOptions(Model): + """Additional parameters for list_usage_metrics operation. + + :param start_time: The earliest time from which to include metrics. This + must be at least two and a half hours before the current time. If not + specified this defaults to the start time of the last aggregation interval + currently available. + :type start_time: datetime + :param end_time: The latest time from which to include metrics. This must + be at least two hours before the current time. If not specified this + defaults to the end time of the last aggregation interval currently + available. + :type end_time: datetime + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 results will be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'start_time': {'key': '', 'type': 'iso-8601'}, + 'end_time': {'key': '', 'type': 'iso-8601'}, + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, start_time=None, end_time=None, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolListUsageMetricsOptions, self).__init__(**kwargs) + self.start_time = start_time + self.end_time = end_time + self.filter = filter + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_node_counts.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_node_counts.py new file mode 100644 index 00000000..0430b0af --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_node_counts.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolNodeCounts(Model): + """The number of nodes in each state for a pool. + + All required parameters must be populated in order to send to Azure. + + :param pool_id: Required. The ID of the pool. + :type pool_id: str + :param dedicated: The number of dedicated nodes in each state. + :type dedicated: ~azure.batch.models.NodeCounts + :param low_priority: The number of low priority nodes in each state. + :type low_priority: ~azure.batch.models.NodeCounts + """ + + _validation = { + 'pool_id': {'required': True}, + } + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'dedicated': {'key': 'dedicated', 'type': 'NodeCounts'}, + 'low_priority': {'key': 'lowPriority', 'type': 'NodeCounts'}, + } + + def __init__(self, **kwargs): + super(PoolNodeCounts, self).__init__(**kwargs) + self.pool_id = kwargs.get('pool_id', None) + self.dedicated = kwargs.get('dedicated', None) + self.low_priority = kwargs.get('low_priority', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_node_counts_paged.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_node_counts_paged.py new file mode 100644 index 00000000..67159e5d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_node_counts_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class PoolNodeCountsPaged(Paged): + """ + A paging container for iterating over a list of :class:`PoolNodeCounts ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[PoolNodeCounts]'} + } + + def __init__(self, *args, **kwargs): + + super(PoolNodeCountsPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_node_counts_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_node_counts_py3.py new file mode 100644 index 00000000..63ef0824 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_node_counts_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolNodeCounts(Model): + """The number of nodes in each state for a pool. + + All required parameters must be populated in order to send to Azure. + + :param pool_id: Required. The ID of the pool. + :type pool_id: str + :param dedicated: The number of dedicated nodes in each state. + :type dedicated: ~azure.batch.models.NodeCounts + :param low_priority: The number of low priority nodes in each state. + :type low_priority: ~azure.batch.models.NodeCounts + """ + + _validation = { + 'pool_id': {'required': True}, + } + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'dedicated': {'key': 'dedicated', 'type': 'NodeCounts'}, + 'low_priority': {'key': 'lowPriority', 'type': 'NodeCounts'}, + } + + def __init__(self, *, pool_id: str, dedicated=None, low_priority=None, **kwargs) -> None: + super(PoolNodeCounts, self).__init__(**kwargs) + self.pool_id = pool_id + self.dedicated = dedicated + self.low_priority = low_priority diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_patch_options.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_patch_options.py new file mode 100644 index 00000000..82b54aef --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_patch_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolPatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolPatchOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_patch_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_patch_options_py3.py new file mode 100644 index 00000000..ff9f10f0 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_patch_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolPatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolPatchOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_patch_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_patch_parameter.py new file mode 100644 index 00000000..b78ca496 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_patch_parameter.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolPatchParameter(Model): + """The set of changes to be made to a pool. + + :param start_task: A task to run on each compute node as it joins the + pool. The task runs when the node is added to the pool or when the node is + restarted. If this element is present, it overwrites any existing start + task. If omitted, any existing start task is left unchanged. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: A list of certificates to be installed on + each compute node in the pool. If this element is present, it replaces any + existing certificate references configured on the pool. If omitted, any + existing certificate references are left unchanged. For Windows compute + nodes, the Batch service installs the certificates to the specified + certificate store and location. For Linux compute nodes, the certificates + are stored in a directory inside the task working directory and an + environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the task to + query for this location. For certificates with visibility of 'remoteUser', + a 'certs' directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: A list of application packages to + be installed on each compute node in the pool. Changes to application + package references affect all new compute nodes joining the pool, but do + not affect compute nodes that are already in the pool until they are + rebooted or reimaged. If this element is present, it replaces any existing + application package references. If you specify an empty collection, then + all application package references are removed from the pool. If omitted, + any existing application package references are left unchanged. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param metadata: A list of name-value pairs associated with the pool as + metadata. If this element is present, it replaces any existing metadata + configured on the pool. If you specify an empty collection, any metadata + is removed from the pool. If omitted, any existing metadata is left + unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(PoolPatchParameter, self).__init__(**kwargs) + self.start_task = kwargs.get('start_task', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_patch_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_patch_parameter_py3.py new file mode 100644 index 00000000..6fb389e8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_patch_parameter_py3.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolPatchParameter(Model): + """The set of changes to be made to a pool. + + :param start_task: A task to run on each compute node as it joins the + pool. The task runs when the node is added to the pool or when the node is + restarted. If this element is present, it overwrites any existing start + task. If omitted, any existing start task is left unchanged. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: A list of certificates to be installed on + each compute node in the pool. If this element is present, it replaces any + existing certificate references configured on the pool. If omitted, any + existing certificate references are left unchanged. For Windows compute + nodes, the Batch service installs the certificates to the specified + certificate store and location. For Linux compute nodes, the certificates + are stored in a directory inside the task working directory and an + environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the task to + query for this location. For certificates with visibility of 'remoteUser', + a 'certs' directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: A list of application packages to + be installed on each compute node in the pool. Changes to application + package references affect all new compute nodes joining the pool, but do + not affect compute nodes that are already in the pool until they are + rebooted or reimaged. If this element is present, it replaces any existing + application package references. If you specify an empty collection, then + all application package references are removed from the pool. If omitted, + any existing application package references are left unchanged. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param metadata: A list of name-value pairs associated with the pool as + metadata. If this element is present, it replaces any existing metadata + configured on the pool. If you specify an empty collection, any metadata + is removed from the pool. If omitted, any existing metadata is left + unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, start_task=None, certificate_references=None, application_package_references=None, metadata=None, **kwargs) -> None: + super(PoolPatchParameter, self).__init__(**kwargs) + self.start_task = start_task + self.certificate_references = certificate_references + self.application_package_references = application_package_references + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_remove_nodes_options.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_remove_nodes_options.py new file mode 100644 index 00000000..14be8ddd --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_remove_nodes_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolRemoveNodesOptions(Model): + """Additional parameters for remove_nodes operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolRemoveNodesOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_remove_nodes_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_remove_nodes_options_py3.py new file mode 100644 index 00000000..1fe5eb97 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_remove_nodes_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolRemoveNodesOptions(Model): + """Additional parameters for remove_nodes operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolRemoveNodesOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_resize_options.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_resize_options.py new file mode 100644 index 00000000..e83a7ccc --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_resize_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolResizeOptions(Model): + """Additional parameters for resize operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolResizeOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_resize_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_resize_options_py3.py new file mode 100644 index 00000000..ef457e81 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_resize_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolResizeOptions(Model): + """Additional parameters for resize operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolResizeOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_resize_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_resize_parameter.py new file mode 100644 index 00000000..e37a88c8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_resize_parameter.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolResizeParameter(Model): + """Options for changing the size of a pool. + + :param target_dedicated_nodes: The desired number of dedicated compute + nodes in the pool. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + compute nodes in the pool. + :type target_low_priority_nodes: int + :param resize_timeout: The timeout for allocation of compute nodes to the + pool or removal of compute nodes from the pool. The default value is 15 + minutes. The minimum value is 5 minutes. If you specify a value less than + 5 minutes, the Batch service returns an error; if you are calling the REST + API directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param node_deallocation_option: Determines what to do with a node and its + running task(s) if the pool size is decreasing. The default value is + requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion', 'retainedData' + :type node_deallocation_option: str or + ~azure.batch.models.ComputeNodeDeallocationOption + """ + + _attribute_map = { + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, + } + + def __init__(self, **kwargs): + super(PoolResizeParameter, self).__init__(**kwargs) + self.target_dedicated_nodes = kwargs.get('target_dedicated_nodes', None) + self.target_low_priority_nodes = kwargs.get('target_low_priority_nodes', None) + self.resize_timeout = kwargs.get('resize_timeout', None) + self.node_deallocation_option = kwargs.get('node_deallocation_option', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_resize_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_resize_parameter_py3.py new file mode 100644 index 00000000..6aff469f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_resize_parameter_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolResizeParameter(Model): + """Options for changing the size of a pool. + + :param target_dedicated_nodes: The desired number of dedicated compute + nodes in the pool. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + compute nodes in the pool. + :type target_low_priority_nodes: int + :param resize_timeout: The timeout for allocation of compute nodes to the + pool or removal of compute nodes from the pool. The default value is 15 + minutes. The minimum value is 5 minutes. If you specify a value less than + 5 minutes, the Batch service returns an error; if you are calling the REST + API directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param node_deallocation_option: Determines what to do with a node and its + running task(s) if the pool size is decreasing. The default value is + requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion', 'retainedData' + :type node_deallocation_option: str or + ~azure.batch.models.ComputeNodeDeallocationOption + """ + + _attribute_map = { + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, + } + + def __init__(self, *, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, resize_timeout=None, node_deallocation_option=None, **kwargs) -> None: + super(PoolResizeParameter, self).__init__(**kwargs) + self.target_dedicated_nodes = target_dedicated_nodes + self.target_low_priority_nodes = target_low_priority_nodes + self.resize_timeout = resize_timeout + self.node_deallocation_option = node_deallocation_option diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_specification.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_specification.py new file mode 100644 index 00000000..465157cb --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_specification.py @@ -0,0 +1,186 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolSpecification(Model): + """Specification for creating a new pool. + + All required parameters must be populated in order to send to Azure. + + :param display_name: The display name for the pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param vm_size: Required. The size of the virtual machines in the pool. + All virtual machines in a pool are the same size. For information about + available sizes of virtual machines in pools, see Choose a VM size for + compute nodes in an Azure Batch pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the pool. This property must be specified if the pool needs to be created + with Azure PaaS VMs. This property and virtualMachineConfiguration are + mutually exclusive and one of the properties must be specified. If neither + is specified then the Batch service returns an error; if you are calling + the REST API directly, the HTTP status code is 400 (Bad Request). This + property cannot be specified if the Batch account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the pool. This property must be specified if the pool needs to be + created with Azure IaaS VMs. This property and cloudServiceConfiguration + are mutually exclusive and one of the properties must be specified. If + neither is specified then the Batch service returns an error; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param max_tasks_per_node: The maximum number of tasks that can run + concurrently on a single compute node in the pool. The default value is 1. + The maximum value of this setting depends on the size of the compute nodes + in the pool (the vmSize setting). + :type max_tasks_per_node: int + :param task_scheduling_policy: How tasks are distributed across compute + nodes in a pool. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param resize_timeout: The timeout for allocation of compute nodes to the + pool. This timeout applies only to manual scaling; it has no effect when + enableAutoScale is set to true. The default value is 15 minutes. The + minimum value is 5 minutes. If you specify a value less than 5 minutes, + the Batch service rejects the request with an error; if you are calling + the REST API directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param target_dedicated_nodes: The desired number of dedicated compute + nodes in the pool. This property must not be specified if enableAutoScale + is set to true. If enableAutoScale is set to false, then you must set + either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + compute nodes in the pool. This property must not be specified if + enableAutoScale is set to true. If enableAutoScale is set to false, then + you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + element is required. The pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: The formula for the desired number of compute + nodes in the pool. This property must not be specified if enableAutoScale + is set to false. It is required if enableAutoScale is set to true. The + formula is checked for validity before the pool is created. If the formula + is not valid, the Batch service rejects the request with detailed error + information. + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service rejects the request with an + invalid property value error; if you are calling the REST API directly, + the HTTP status code is 400 (Bad Request). + :type auto_scale_evaluation_interval: timedelta + :param enable_inter_node_communication: Whether the pool permits direct + communication between nodes. Enabling inter-node communication limits the + maximum size of the pool due to deployment restrictions on the nodes of + the pool. This may result in the pool not reaching its desired size. The + default value is false. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A task to run on each compute node as it joins the + pool. The task runs when the node is added to the pool or when the node is + restarted. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: A list of certificates to be installed on + each compute node in the pool. For Windows compute nodes, the Batch + service installs the certificates to the specified certificate store and + location. For Linux compute nodes, the certificates are stored in a + directory inside the task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this + location. For certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of application packages to + be installed on each compute node in the pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each compute node in the pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + pool creation will fail. The permitted licenses available on the pool are + 'maya', 'vray', '3dsmax', 'arnold'. An additional charge applies for each + application license added to the pool. + :type application_licenses: list[str] + :param user_accounts: The list of user accounts to be created on each node + in the pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the pool as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'vm_size': {'required': True}, + } + + _attribute_map = { + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(PoolSpecification, self).__init__(**kwargs) + self.display_name = kwargs.get('display_name', None) + self.vm_size = kwargs.get('vm_size', None) + self.cloud_service_configuration = kwargs.get('cloud_service_configuration', None) + self.virtual_machine_configuration = kwargs.get('virtual_machine_configuration', None) + self.max_tasks_per_node = kwargs.get('max_tasks_per_node', None) + self.task_scheduling_policy = kwargs.get('task_scheduling_policy', None) + self.resize_timeout = kwargs.get('resize_timeout', None) + self.target_dedicated_nodes = kwargs.get('target_dedicated_nodes', None) + self.target_low_priority_nodes = kwargs.get('target_low_priority_nodes', None) + self.enable_auto_scale = kwargs.get('enable_auto_scale', None) + self.auto_scale_formula = kwargs.get('auto_scale_formula', None) + self.auto_scale_evaluation_interval = kwargs.get('auto_scale_evaluation_interval', None) + self.enable_inter_node_communication = kwargs.get('enable_inter_node_communication', None) + self.network_configuration = kwargs.get('network_configuration', None) + self.start_task = kwargs.get('start_task', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.application_licenses = kwargs.get('application_licenses', None) + self.user_accounts = kwargs.get('user_accounts', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_specification_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_specification_py3.py new file mode 100644 index 00000000..7f758752 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_specification_py3.py @@ -0,0 +1,186 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolSpecification(Model): + """Specification for creating a new pool. + + All required parameters must be populated in order to send to Azure. + + :param display_name: The display name for the pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param vm_size: Required. The size of the virtual machines in the pool. + All virtual machines in a pool are the same size. For information about + available sizes of virtual machines in pools, see Choose a VM size for + compute nodes in an Azure Batch pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the pool. This property must be specified if the pool needs to be created + with Azure PaaS VMs. This property and virtualMachineConfiguration are + mutually exclusive and one of the properties must be specified. If neither + is specified then the Batch service returns an error; if you are calling + the REST API directly, the HTTP status code is 400 (Bad Request). This + property cannot be specified if the Batch account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the pool. This property must be specified if the pool needs to be + created with Azure IaaS VMs. This property and cloudServiceConfiguration + are mutually exclusive and one of the properties must be specified. If + neither is specified then the Batch service returns an error; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param max_tasks_per_node: The maximum number of tasks that can run + concurrently on a single compute node in the pool. The default value is 1. + The maximum value of this setting depends on the size of the compute nodes + in the pool (the vmSize setting). + :type max_tasks_per_node: int + :param task_scheduling_policy: How tasks are distributed across compute + nodes in a pool. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param resize_timeout: The timeout for allocation of compute nodes to the + pool. This timeout applies only to manual scaling; it has no effect when + enableAutoScale is set to true. The default value is 15 minutes. The + minimum value is 5 minutes. If you specify a value less than 5 minutes, + the Batch service rejects the request with an error; if you are calling + the REST API directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param target_dedicated_nodes: The desired number of dedicated compute + nodes in the pool. This property must not be specified if enableAutoScale + is set to true. If enableAutoScale is set to false, then you must set + either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + compute nodes in the pool. This property must not be specified if + enableAutoScale is set to true. If enableAutoScale is set to false, then + you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + element is required. The pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: The formula for the desired number of compute + nodes in the pool. This property must not be specified if enableAutoScale + is set to false. It is required if enableAutoScale is set to true. The + formula is checked for validity before the pool is created. If the formula + is not valid, the Batch service rejects the request with detailed error + information. + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service rejects the request with an + invalid property value error; if you are calling the REST API directly, + the HTTP status code is 400 (Bad Request). + :type auto_scale_evaluation_interval: timedelta + :param enable_inter_node_communication: Whether the pool permits direct + communication between nodes. Enabling inter-node communication limits the + maximum size of the pool due to deployment restrictions on the nodes of + the pool. This may result in the pool not reaching its desired size. The + default value is false. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A task to run on each compute node as it joins the + pool. The task runs when the node is added to the pool or when the node is + restarted. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: A list of certificates to be installed on + each compute node in the pool. For Windows compute nodes, the Batch + service installs the certificates to the specified certificate store and + location. For Linux compute nodes, the certificates are stored in a + directory inside the task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this + location. For certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of application packages to + be installed on each compute node in the pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each compute node in the pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + pool creation will fail. The permitted licenses available on the pool are + 'maya', 'vray', '3dsmax', 'arnold'. An additional charge applies for each + application license added to the pool. + :type application_licenses: list[str] + :param user_accounts: The list of user accounts to be created on each node + in the pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the pool as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'vm_size': {'required': True}, + } + + _attribute_map = { + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, vm_size: str, display_name: str=None, cloud_service_configuration=None, virtual_machine_configuration=None, max_tasks_per_node: int=None, task_scheduling_policy=None, resize_timeout=None, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, enable_auto_scale: bool=None, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, enable_inter_node_communication: bool=None, network_configuration=None, start_task=None, certificate_references=None, application_package_references=None, application_licenses=None, user_accounts=None, metadata=None, **kwargs) -> None: + super(PoolSpecification, self).__init__(**kwargs) + self.display_name = display_name + self.vm_size = vm_size + self.cloud_service_configuration = cloud_service_configuration + self.virtual_machine_configuration = virtual_machine_configuration + self.max_tasks_per_node = max_tasks_per_node + self.task_scheduling_policy = task_scheduling_policy + self.resize_timeout = resize_timeout + self.target_dedicated_nodes = target_dedicated_nodes + self.target_low_priority_nodes = target_low_priority_nodes + self.enable_auto_scale = enable_auto_scale + self.auto_scale_formula = auto_scale_formula + self.auto_scale_evaluation_interval = auto_scale_evaluation_interval + self.enable_inter_node_communication = enable_inter_node_communication + self.network_configuration = network_configuration + self.start_task = start_task + self.certificate_references = certificate_references + self.application_package_references = application_package_references + self.application_licenses = application_licenses + self.user_accounts = user_accounts + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_statistics.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_statistics.py new file mode 100644 index 00000000..297e40d5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_statistics.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolStatistics(Model): + """Contains utilization and resource usage statistics for the lifetime of a + pool. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL for the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param usage_stats: Statistics related to pool usage, such as the amount + of core-time used. + :type usage_stats: ~azure.batch.models.UsageStatistics + :param resource_stats: Statistics related to resource consumption by + compute nodes in the pool. + :type resource_stats: ~azure.batch.models.ResourceStatistics + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'usage_stats': {'key': 'usageStats', 'type': 'UsageStatistics'}, + 'resource_stats': {'key': 'resourceStats', 'type': 'ResourceStatistics'}, + } + + def __init__(self, **kwargs): + super(PoolStatistics, self).__init__(**kwargs) + self.url = kwargs.get('url', None) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.usage_stats = kwargs.get('usage_stats', None) + self.resource_stats = kwargs.get('resource_stats', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_statistics_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_statistics_py3.py new file mode 100644 index 00000000..f79e93b8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_statistics_py3.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolStatistics(Model): + """Contains utilization and resource usage statistics for the lifetime of a + pool. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL for the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param usage_stats: Statistics related to pool usage, such as the amount + of core-time used. + :type usage_stats: ~azure.batch.models.UsageStatistics + :param resource_stats: Statistics related to resource consumption by + compute nodes in the pool. + :type resource_stats: ~azure.batch.models.ResourceStatistics + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'usage_stats': {'key': 'usageStats', 'type': 'UsageStatistics'}, + 'resource_stats': {'key': 'resourceStats', 'type': 'ResourceStatistics'}, + } + + def __init__(self, *, url: str, start_time, last_update_time, usage_stats=None, resource_stats=None, **kwargs) -> None: + super(PoolStatistics, self).__init__(**kwargs) + self.url = url + self.start_time = start_time + self.last_update_time = last_update_time + self.usage_stats = usage_stats + self.resource_stats = resource_stats diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_stop_resize_options.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_stop_resize_options.py new file mode 100644 index 00000000..ab8fec73 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_stop_resize_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolStopResizeOptions(Model): + """Additional parameters for stop_resize operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolStopResizeOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_stop_resize_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_stop_resize_options_py3.py new file mode 100644 index 00000000..d5cc404e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_stop_resize_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolStopResizeOptions(Model): + """Additional parameters for stop_resize operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolStopResizeOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_update_properties_options.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_update_properties_options.py new file mode 100644 index 00000000..ca7f97cb --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_update_properties_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUpdatePropertiesOptions(Model): + """Additional parameters for update_properties operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolUpdatePropertiesOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_update_properties_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_update_properties_options_py3.py new file mode 100644 index 00000000..edf5065c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_update_properties_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUpdatePropertiesOptions(Model): + """Additional parameters for update_properties operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolUpdatePropertiesOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_update_properties_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_update_properties_parameter.py new file mode 100644 index 00000000..bbc35acc --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_update_properties_parameter.py @@ -0,0 +1,74 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUpdatePropertiesParameter(Model): + """The set of changes to be made to a pool. + + All required parameters must be populated in order to send to Azure. + + :param start_task: A task to run on each compute node as it joins the + pool. The task runs when the node is added to the pool or when the node is + restarted. If this element is present, it overwrites any existing start + task. If omitted, any existing start task is removed from the pool. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: Required. A list of certificates to be + installed on each compute node in the pool. This list replaces any + existing certificate references configured on the pool. If you specify an + empty collection, any existing certificate references are removed from the + pool. For Windows compute nodes, the Batch service installs the + certificates to the specified certificate store and location. For Linux + compute nodes, the certificates are stored in a directory inside the task + working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is + supplied to the task to query for this location. For certificates with + visibility of 'remoteUser', a 'certs' directory is created in the user's + home directory (e.g., /home/{user-name}/certs) and certificates are placed + in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: Required. A list of application + packages to be installed on each compute node in the pool. The list + replaces any existing application package references on the pool. Changes + to application package references affect all new compute nodes joining the + pool, but do not affect compute nodes that are already in the pool until + they are rebooted or reimaged. If omitted, or if you specify an empty + collection, any existing application packages references are removed from + the pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param metadata: Required. A list of name-value pairs associated with the + pool as metadata. This list replaces any existing metadata configured on + the pool. If omitted, or if you specify an empty collection, any existing + metadata is removed from the pool. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'certificate_references': {'required': True}, + 'application_package_references': {'required': True}, + 'metadata': {'required': True}, + } + + _attribute_map = { + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(PoolUpdatePropertiesParameter, self).__init__(**kwargs) + self.start_task = kwargs.get('start_task', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_update_properties_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_update_properties_parameter_py3.py new file mode 100644 index 00000000..20457439 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_update_properties_parameter_py3.py @@ -0,0 +1,74 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUpdatePropertiesParameter(Model): + """The set of changes to be made to a pool. + + All required parameters must be populated in order to send to Azure. + + :param start_task: A task to run on each compute node as it joins the + pool. The task runs when the node is added to the pool or when the node is + restarted. If this element is present, it overwrites any existing start + task. If omitted, any existing start task is removed from the pool. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: Required. A list of certificates to be + installed on each compute node in the pool. This list replaces any + existing certificate references configured on the pool. If you specify an + empty collection, any existing certificate references are removed from the + pool. For Windows compute nodes, the Batch service installs the + certificates to the specified certificate store and location. For Linux + compute nodes, the certificates are stored in a directory inside the task + working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is + supplied to the task to query for this location. For certificates with + visibility of 'remoteUser', a 'certs' directory is created in the user's + home directory (e.g., /home/{user-name}/certs) and certificates are placed + in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: Required. A list of application + packages to be installed on each compute node in the pool. The list + replaces any existing application package references on the pool. Changes + to application package references affect all new compute nodes joining the + pool, but do not affect compute nodes that are already in the pool until + they are rebooted or reimaged. If omitted, or if you specify an empty + collection, any existing application packages references are removed from + the pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param metadata: Required. A list of name-value pairs associated with the + pool as metadata. This list replaces any existing metadata configured on + the pool. If omitted, or if you specify an empty collection, any existing + metadata is removed from the pool. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'certificate_references': {'required': True}, + 'application_package_references': {'required': True}, + 'metadata': {'required': True}, + } + + _attribute_map = { + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, certificate_references, application_package_references, metadata, start_task=None, **kwargs) -> None: + super(PoolUpdatePropertiesParameter, self).__init__(**kwargs) + self.start_task = start_task + self.certificate_references = certificate_references + self.application_package_references = application_package_references + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_upgrade_os_options.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_upgrade_os_options.py new file mode 100644 index 00000000..fbdfdf91 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_upgrade_os_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUpgradeOsOptions(Model): + """Additional parameters for upgrade_os operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolUpgradeOsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_upgrade_os_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_upgrade_os_options_py3.py new file mode 100644 index 00000000..67884745 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_upgrade_os_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUpgradeOsOptions(Model): + """Additional parameters for upgrade_os operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolUpgradeOsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_upgrade_os_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_upgrade_os_parameter.py new file mode 100644 index 00000000..17141ac0 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_upgrade_os_parameter.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUpgradeOSParameter(Model): + """Options for upgrading the operating system of compute nodes in a pool. + + All required parameters must be populated in order to send to Azure. + + :param target_os_version: Required. The Azure Guest OS version to be + installed on the virtual machines in the pool. + :type target_os_version: str + """ + + _validation = { + 'target_os_version': {'required': True}, + } + + _attribute_map = { + 'target_os_version': {'key': 'targetOSVersion', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(PoolUpgradeOSParameter, self).__init__(**kwargs) + self.target_os_version = kwargs.get('target_os_version', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_upgrade_os_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_upgrade_os_parameter_py3.py new file mode 100644 index 00000000..de2169d0 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_upgrade_os_parameter_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUpgradeOSParameter(Model): + """Options for upgrading the operating system of compute nodes in a pool. + + All required parameters must be populated in order to send to Azure. + + :param target_os_version: Required. The Azure Guest OS version to be + installed on the virtual machines in the pool. + :type target_os_version: str + """ + + _validation = { + 'target_os_version': {'required': True}, + } + + _attribute_map = { + 'target_os_version': {'key': 'targetOSVersion', 'type': 'str'}, + } + + def __init__(self, *, target_os_version: str, **kwargs) -> None: + super(PoolUpgradeOSParameter, self).__init__(**kwargs) + self.target_os_version = target_os_version diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_usage_metrics.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_usage_metrics.py new file mode 100644 index 00000000..93cfa03f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_usage_metrics.py @@ -0,0 +1,74 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUsageMetrics(Model): + """Usage metrics for a pool across an aggregation interval. + + All required parameters must be populated in order to send to Azure. + + :param pool_id: Required. The ID of the pool whose metrics are aggregated + in this entry. + :type pool_id: str + :param start_time: Required. The start time of the aggregation interval + covered by this entry. + :type start_time: datetime + :param end_time: Required. The end time of the aggregation interval + covered by this entry. + :type end_time: datetime + :param vm_size: Required. The size of virtual machines in the pool. All + VMs in a pool are the same size. For information about available sizes of + virtual machines in pools, see Choose a VM size for compute nodes in an + Azure Batch pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param total_core_hours: Required. The total core hours used in the pool + during this aggregation interval. + :type total_core_hours: float + :param data_ingress_gi_b: Required. The cross data center network ingress + to the pool during this interval, in GiB. + :type data_ingress_gi_b: float + :param data_egress_gi_b: Required. The cross data center network egress + from the pool during this interval, in GiB. + :type data_egress_gi_b: float + """ + + _validation = { + 'pool_id': {'required': True}, + 'start_time': {'required': True}, + 'end_time': {'required': True}, + 'vm_size': {'required': True}, + 'total_core_hours': {'required': True}, + 'data_ingress_gi_b': {'required': True}, + 'data_egress_gi_b': {'required': True}, + } + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'total_core_hours': {'key': 'totalCoreHours', 'type': 'float'}, + 'data_ingress_gi_b': {'key': 'dataIngressGiB', 'type': 'float'}, + 'data_egress_gi_b': {'key': 'dataEgressGiB', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(PoolUsageMetrics, self).__init__(**kwargs) + self.pool_id = kwargs.get('pool_id', None) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.vm_size = kwargs.get('vm_size', None) + self.total_core_hours = kwargs.get('total_core_hours', None) + self.data_ingress_gi_b = kwargs.get('data_ingress_gi_b', None) + self.data_egress_gi_b = kwargs.get('data_egress_gi_b', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_usage_metrics_paged.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_usage_metrics_paged.py new file mode 100644 index 00000000..891554f1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_usage_metrics_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class PoolUsageMetricsPaged(Paged): + """ + A paging container for iterating over a list of :class:`PoolUsageMetrics ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[PoolUsageMetrics]'} + } + + def __init__(self, *args, **kwargs): + + super(PoolUsageMetricsPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/pool_usage_metrics_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/pool_usage_metrics_py3.py new file mode 100644 index 00000000..5c7ea9eb --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/pool_usage_metrics_py3.py @@ -0,0 +1,74 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUsageMetrics(Model): + """Usage metrics for a pool across an aggregation interval. + + All required parameters must be populated in order to send to Azure. + + :param pool_id: Required. The ID of the pool whose metrics are aggregated + in this entry. + :type pool_id: str + :param start_time: Required. The start time of the aggregation interval + covered by this entry. + :type start_time: datetime + :param end_time: Required. The end time of the aggregation interval + covered by this entry. + :type end_time: datetime + :param vm_size: Required. The size of virtual machines in the pool. All + VMs in a pool are the same size. For information about available sizes of + virtual machines in pools, see Choose a VM size for compute nodes in an + Azure Batch pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param total_core_hours: Required. The total core hours used in the pool + during this aggregation interval. + :type total_core_hours: float + :param data_ingress_gi_b: Required. The cross data center network ingress + to the pool during this interval, in GiB. + :type data_ingress_gi_b: float + :param data_egress_gi_b: Required. The cross data center network egress + from the pool during this interval, in GiB. + :type data_egress_gi_b: float + """ + + _validation = { + 'pool_id': {'required': True}, + 'start_time': {'required': True}, + 'end_time': {'required': True}, + 'vm_size': {'required': True}, + 'total_core_hours': {'required': True}, + 'data_ingress_gi_b': {'required': True}, + 'data_egress_gi_b': {'required': True}, + } + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'total_core_hours': {'key': 'totalCoreHours', 'type': 'float'}, + 'data_ingress_gi_b': {'key': 'dataIngressGiB', 'type': 'float'}, + 'data_egress_gi_b': {'key': 'dataEgressGiB', 'type': 'float'}, + } + + def __init__(self, *, pool_id: str, start_time, end_time, vm_size: str, total_core_hours: float, data_ingress_gi_b: float, data_egress_gi_b: float, **kwargs) -> None: + super(PoolUsageMetrics, self).__init__(**kwargs) + self.pool_id = pool_id + self.start_time = start_time + self.end_time = end_time + self.vm_size = vm_size + self.total_core_hours = total_core_hours + self.data_ingress_gi_b = data_ingress_gi_b + self.data_egress_gi_b = data_egress_gi_b diff --git a/azext/generated/sdk/batch/v2018_08_01/models/recent_job.py b/azext/generated/sdk/batch/v2018_08_01/models/recent_job.py new file mode 100644 index 00000000..11d430a5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/recent_job.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RecentJob(Model): + """Information about the most recent job to run under the job schedule. + + :param id: The ID of the job. + :type id: str + :param url: The URL of the job. + :type url: str + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(RecentJob, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.url = kwargs.get('url', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/recent_job_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/recent_job_py3.py new file mode 100644 index 00000000..94b133ae --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/recent_job_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RecentJob(Model): + """Information about the most recent job to run under the job schedule. + + :param id: The ID of the job. + :type id: str + :param url: The URL of the job. + :type url: str + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + def __init__(self, *, id: str=None, url: str=None, **kwargs) -> None: + super(RecentJob, self).__init__(**kwargs) + self.id = id + self.url = url diff --git a/azext/generated/sdk/batch/v2018_08_01/models/resize_error.py b/azext/generated/sdk/batch/v2018_08_01/models/resize_error.py new file mode 100644 index 00000000..8d166d81 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/resize_error.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResizeError(Model): + """An error that occurred when resizing a pool. + + :param code: An identifier for the pool resize error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the pool resize error, intended to be + suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the pool + resize error. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(ResizeError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.values = kwargs.get('values', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/resize_error_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/resize_error_py3.py new file mode 100644 index 00000000..9e400e60 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/resize_error_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResizeError(Model): + """An error that occurred when resizing a pool. + + :param code: An identifier for the pool resize error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the pool resize error, intended to be + suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the pool + resize error. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, code: str=None, message: str=None, values=None, **kwargs) -> None: + super(ResizeError, self).__init__(**kwargs) + self.code = code + self.message = message + self.values = values diff --git a/azext/generated/sdk/batch/v2018_08_01/models/resource_file.py b/azext/generated/sdk/batch/v2018_08_01/models/resource_file.py new file mode 100644 index 00000000..f7a04764 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/resource_file.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResourceFile(Model): + """A file to be downloaded from Azure blob storage to a compute node. + + All required parameters must be populated in order to send to Azure. + + :param blob_source: Required. The URL of the file within Azure Blob + Storage. This URL must be readable using anonymous access; that is, the + Batch service does not present any credentials when downloading the blob. + There are two ways to get such a URL for a blob in Azure storage: include + a Shared Access Signature (SAS) granting read permissions on the blob, or + set the ACL for the blob or its container to allow public access. + :type blob_source: str + :param file_path: Required. The location on the compute node to which to + download the file, relative to the task's working directory. + :type file_path: str + :param file_mode: The file permission mode attribute in octal format. This + property applies only to files being downloaded to Linux compute nodes. It + will be ignored if it is specified for a resourceFile which will be + downloaded to a Windows node. If this property is not specified for a + Linux node, then a default value of 0770 is applied to the file. + :type file_mode: str + """ + + _validation = { + 'blob_source': {'required': True}, + 'file_path': {'required': True}, + } + + _attribute_map = { + 'blob_source': {'key': 'blobSource', 'type': 'str'}, + 'file_path': {'key': 'filePath', 'type': 'str'}, + 'file_mode': {'key': 'fileMode', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ResourceFile, self).__init__(**kwargs) + self.blob_source = kwargs.get('blob_source', None) + self.file_path = kwargs.get('file_path', None) + self.file_mode = kwargs.get('file_mode', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/resource_file_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/resource_file_py3.py new file mode 100644 index 00000000..b7c792e7 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/resource_file_py3.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResourceFile(Model): + """A file to be downloaded from Azure blob storage to a compute node. + + All required parameters must be populated in order to send to Azure. + + :param blob_source: Required. The URL of the file within Azure Blob + Storage. This URL must be readable using anonymous access; that is, the + Batch service does not present any credentials when downloading the blob. + There are two ways to get such a URL for a blob in Azure storage: include + a Shared Access Signature (SAS) granting read permissions on the blob, or + set the ACL for the blob or its container to allow public access. + :type blob_source: str + :param file_path: Required. The location on the compute node to which to + download the file, relative to the task's working directory. + :type file_path: str + :param file_mode: The file permission mode attribute in octal format. This + property applies only to files being downloaded to Linux compute nodes. It + will be ignored if it is specified for a resourceFile which will be + downloaded to a Windows node. If this property is not specified for a + Linux node, then a default value of 0770 is applied to the file. + :type file_mode: str + """ + + _validation = { + 'blob_source': {'required': True}, + 'file_path': {'required': True}, + } + + _attribute_map = { + 'blob_source': {'key': 'blobSource', 'type': 'str'}, + 'file_path': {'key': 'filePath', 'type': 'str'}, + 'file_mode': {'key': 'fileMode', 'type': 'str'}, + } + + def __init__(self, *, blob_source: str, file_path: str, file_mode: str=None, **kwargs) -> None: + super(ResourceFile, self).__init__(**kwargs) + self.blob_source = blob_source + self.file_path = file_path + self.file_mode = file_mode diff --git a/azext/generated/sdk/batch/v2018_08_01/models/resource_statistics.py b/azext/generated/sdk/batch/v2018_08_01/models/resource_statistics.py new file mode 100644 index 00000000..5e861d9e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/resource_statistics.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResourceStatistics(Model): + """Statistics related to resource consumption by compute nodes in a pool. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param avg_cpu_percentage: Required. The average CPU usage across all + nodes in the pool (percentage per node). + :type avg_cpu_percentage: float + :param avg_memory_gi_b: Required. The average memory usage in GiB across + all nodes in the pool. + :type avg_memory_gi_b: float + :param peak_memory_gi_b: Required. The peak memory usage in GiB across all + nodes in the pool. + :type peak_memory_gi_b: float + :param avg_disk_gi_b: Required. The average used disk space in GiB across + all nodes in the pool. + :type avg_disk_gi_b: float + :param peak_disk_gi_b: Required. The peak used disk space in GiB across + all nodes in the pool. + :type peak_disk_gi_b: float + :param disk_read_iops: Required. The total number of disk read operations + across all nodes in the pool. + :type disk_read_iops: long + :param disk_write_iops: Required. The total number of disk write + operations across all nodes in the pool. + :type disk_write_iops: long + :param disk_read_gi_b: Required. The total amount of data in GiB of disk + reads across all nodes in the pool. + :type disk_read_gi_b: float + :param disk_write_gi_b: Required. The total amount of data in GiB of disk + writes across all nodes in the pool. + :type disk_write_gi_b: float + :param network_read_gi_b: Required. The total amount of data in GiB of + network reads across all nodes in the pool. + :type network_read_gi_b: float + :param network_write_gi_b: Required. The total amount of data in GiB of + network writes across all nodes in the pool. + :type network_write_gi_b: float + """ + + _validation = { + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'avg_cpu_percentage': {'required': True}, + 'avg_memory_gi_b': {'required': True}, + 'peak_memory_gi_b': {'required': True}, + 'avg_disk_gi_b': {'required': True}, + 'peak_disk_gi_b': {'required': True}, + 'disk_read_iops': {'required': True}, + 'disk_write_iops': {'required': True}, + 'disk_read_gi_b': {'required': True}, + 'disk_write_gi_b': {'required': True}, + 'network_read_gi_b': {'required': True}, + 'network_write_gi_b': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'avg_cpu_percentage': {'key': 'avgCPUPercentage', 'type': 'float'}, + 'avg_memory_gi_b': {'key': 'avgMemoryGiB', 'type': 'float'}, + 'peak_memory_gi_b': {'key': 'peakMemoryGiB', 'type': 'float'}, + 'avg_disk_gi_b': {'key': 'avgDiskGiB', 'type': 'float'}, + 'peak_disk_gi_b': {'key': 'peakDiskGiB', 'type': 'float'}, + 'disk_read_iops': {'key': 'diskReadIOps', 'type': 'long'}, + 'disk_write_iops': {'key': 'diskWriteIOps', 'type': 'long'}, + 'disk_read_gi_b': {'key': 'diskReadGiB', 'type': 'float'}, + 'disk_write_gi_b': {'key': 'diskWriteGiB', 'type': 'float'}, + 'network_read_gi_b': {'key': 'networkReadGiB', 'type': 'float'}, + 'network_write_gi_b': {'key': 'networkWriteGiB', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(ResourceStatistics, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.avg_cpu_percentage = kwargs.get('avg_cpu_percentage', None) + self.avg_memory_gi_b = kwargs.get('avg_memory_gi_b', None) + self.peak_memory_gi_b = kwargs.get('peak_memory_gi_b', None) + self.avg_disk_gi_b = kwargs.get('avg_disk_gi_b', None) + self.peak_disk_gi_b = kwargs.get('peak_disk_gi_b', None) + self.disk_read_iops = kwargs.get('disk_read_iops', None) + self.disk_write_iops = kwargs.get('disk_write_iops', None) + self.disk_read_gi_b = kwargs.get('disk_read_gi_b', None) + self.disk_write_gi_b = kwargs.get('disk_write_gi_b', None) + self.network_read_gi_b = kwargs.get('network_read_gi_b', None) + self.network_write_gi_b = kwargs.get('network_write_gi_b', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/resource_statistics_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/resource_statistics_py3.py new file mode 100644 index 00000000..bcf0830f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/resource_statistics_py3.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResourceStatistics(Model): + """Statistics related to resource consumption by compute nodes in a pool. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param avg_cpu_percentage: Required. The average CPU usage across all + nodes in the pool (percentage per node). + :type avg_cpu_percentage: float + :param avg_memory_gi_b: Required. The average memory usage in GiB across + all nodes in the pool. + :type avg_memory_gi_b: float + :param peak_memory_gi_b: Required. The peak memory usage in GiB across all + nodes in the pool. + :type peak_memory_gi_b: float + :param avg_disk_gi_b: Required. The average used disk space in GiB across + all nodes in the pool. + :type avg_disk_gi_b: float + :param peak_disk_gi_b: Required. The peak used disk space in GiB across + all nodes in the pool. + :type peak_disk_gi_b: float + :param disk_read_iops: Required. The total number of disk read operations + across all nodes in the pool. + :type disk_read_iops: long + :param disk_write_iops: Required. The total number of disk write + operations across all nodes in the pool. + :type disk_write_iops: long + :param disk_read_gi_b: Required. The total amount of data in GiB of disk + reads across all nodes in the pool. + :type disk_read_gi_b: float + :param disk_write_gi_b: Required. The total amount of data in GiB of disk + writes across all nodes in the pool. + :type disk_write_gi_b: float + :param network_read_gi_b: Required. The total amount of data in GiB of + network reads across all nodes in the pool. + :type network_read_gi_b: float + :param network_write_gi_b: Required. The total amount of data in GiB of + network writes across all nodes in the pool. + :type network_write_gi_b: float + """ + + _validation = { + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'avg_cpu_percentage': {'required': True}, + 'avg_memory_gi_b': {'required': True}, + 'peak_memory_gi_b': {'required': True}, + 'avg_disk_gi_b': {'required': True}, + 'peak_disk_gi_b': {'required': True}, + 'disk_read_iops': {'required': True}, + 'disk_write_iops': {'required': True}, + 'disk_read_gi_b': {'required': True}, + 'disk_write_gi_b': {'required': True}, + 'network_read_gi_b': {'required': True}, + 'network_write_gi_b': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'avg_cpu_percentage': {'key': 'avgCPUPercentage', 'type': 'float'}, + 'avg_memory_gi_b': {'key': 'avgMemoryGiB', 'type': 'float'}, + 'peak_memory_gi_b': {'key': 'peakMemoryGiB', 'type': 'float'}, + 'avg_disk_gi_b': {'key': 'avgDiskGiB', 'type': 'float'}, + 'peak_disk_gi_b': {'key': 'peakDiskGiB', 'type': 'float'}, + 'disk_read_iops': {'key': 'diskReadIOps', 'type': 'long'}, + 'disk_write_iops': {'key': 'diskWriteIOps', 'type': 'long'}, + 'disk_read_gi_b': {'key': 'diskReadGiB', 'type': 'float'}, + 'disk_write_gi_b': {'key': 'diskWriteGiB', 'type': 'float'}, + 'network_read_gi_b': {'key': 'networkReadGiB', 'type': 'float'}, + 'network_write_gi_b': {'key': 'networkWriteGiB', 'type': 'float'}, + } + + def __init__(self, *, start_time, last_update_time, avg_cpu_percentage: float, avg_memory_gi_b: float, peak_memory_gi_b: float, avg_disk_gi_b: float, peak_disk_gi_b: float, disk_read_iops: int, disk_write_iops: int, disk_read_gi_b: float, disk_write_gi_b: float, network_read_gi_b: float, network_write_gi_b: float, **kwargs) -> None: + super(ResourceStatistics, self).__init__(**kwargs) + self.start_time = start_time + self.last_update_time = last_update_time + self.avg_cpu_percentage = avg_cpu_percentage + self.avg_memory_gi_b = avg_memory_gi_b + self.peak_memory_gi_b = peak_memory_gi_b + self.avg_disk_gi_b = avg_disk_gi_b + self.peak_disk_gi_b = peak_disk_gi_b + self.disk_read_iops = disk_read_iops + self.disk_write_iops = disk_write_iops + self.disk_read_gi_b = disk_read_gi_b + self.disk_write_gi_b = disk_write_gi_b + self.network_read_gi_b = network_read_gi_b + self.network_write_gi_b = network_write_gi_b diff --git a/azext/generated/sdk/batch/v2018_08_01/models/schedule.py b/azext/generated/sdk/batch/v2018_08_01/models/schedule.py new file mode 100644 index 00000000..e6339eb9 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/schedule.py @@ -0,0 +1,74 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Schedule(Model): + """The schedule according to which jobs will be created. + + :param do_not_run_until: The earliest time at which any job may be created + under this job schedule. If you do not specify a doNotRunUntil time, the + schedule becomes ready to create jobs immediately. + :type do_not_run_until: datetime + :param do_not_run_after: A time after which no job will be created under + this job schedule. The schedule will move to the completed state as soon + as this deadline is past and there is no active job under this job + schedule. If you do not specify a doNotRunAfter time, and you are creating + a recurring job schedule, the job schedule will remain active until you + explicitly terminate it. + :type do_not_run_after: datetime + :param start_window: The time interval, starting from the time at which + the schedule indicates a job should be created, within which a job must be + created. If a job is not created within the startWindow interval, then the + 'opportunity' is lost; no job will be created until the next recurrence of + the schedule. If the schedule is recurring, and the startWindow is longer + than the recurrence interval, then this is equivalent to an infinite + startWindow, because the job that is 'due' in one recurrenceInterval is + not carried forward into the next recurrence interval. The default is + infinite. The minimum value is 1 minute. If you specify a lower value, the + Batch service rejects the schedule with an error; if you are calling the + REST API directly, the HTTP status code is 400 (Bad Request). + :type start_window: timedelta + :param recurrence_interval: The time interval between the start times of + two successive jobs under the job schedule. A job schedule can have at + most one active job under it at any given time. Because a job schedule can + have at most one active job under it at any given time, if it is time to + create a new job under a job schedule, but the previous job is still + running, the Batch service will not create the new job until the previous + job finishes. If the previous job does not finish within the startWindow + period of the new recurrenceInterval, then no new job will be scheduled + for that interval. For recurring jobs, you should normally specify a + jobManagerTask in the jobSpecification. If you do not use jobManagerTask, + you will need an external process to monitor when jobs are created, add + tasks to the jobs and terminate the jobs ready for the next recurrence. + The default is that the schedule does not recur: one job is created, + within the startWindow after the doNotRunUntil time, and the schedule is + complete as soon as that job finishes. The minimum value is 1 minute. If + you specify a lower value, the Batch service rejects the schedule with an + error; if you are calling the REST API directly, the HTTP status code is + 400 (Bad Request). + :type recurrence_interval: timedelta + """ + + _attribute_map = { + 'do_not_run_until': {'key': 'doNotRunUntil', 'type': 'iso-8601'}, + 'do_not_run_after': {'key': 'doNotRunAfter', 'type': 'iso-8601'}, + 'start_window': {'key': 'startWindow', 'type': 'duration'}, + 'recurrence_interval': {'key': 'recurrenceInterval', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(Schedule, self).__init__(**kwargs) + self.do_not_run_until = kwargs.get('do_not_run_until', None) + self.do_not_run_after = kwargs.get('do_not_run_after', None) + self.start_window = kwargs.get('start_window', None) + self.recurrence_interval = kwargs.get('recurrence_interval', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/schedule_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/schedule_py3.py new file mode 100644 index 00000000..66ab18a4 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/schedule_py3.py @@ -0,0 +1,74 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Schedule(Model): + """The schedule according to which jobs will be created. + + :param do_not_run_until: The earliest time at which any job may be created + under this job schedule. If you do not specify a doNotRunUntil time, the + schedule becomes ready to create jobs immediately. + :type do_not_run_until: datetime + :param do_not_run_after: A time after which no job will be created under + this job schedule. The schedule will move to the completed state as soon + as this deadline is past and there is no active job under this job + schedule. If you do not specify a doNotRunAfter time, and you are creating + a recurring job schedule, the job schedule will remain active until you + explicitly terminate it. + :type do_not_run_after: datetime + :param start_window: The time interval, starting from the time at which + the schedule indicates a job should be created, within which a job must be + created. If a job is not created within the startWindow interval, then the + 'opportunity' is lost; no job will be created until the next recurrence of + the schedule. If the schedule is recurring, and the startWindow is longer + than the recurrence interval, then this is equivalent to an infinite + startWindow, because the job that is 'due' in one recurrenceInterval is + not carried forward into the next recurrence interval. The default is + infinite. The minimum value is 1 minute. If you specify a lower value, the + Batch service rejects the schedule with an error; if you are calling the + REST API directly, the HTTP status code is 400 (Bad Request). + :type start_window: timedelta + :param recurrence_interval: The time interval between the start times of + two successive jobs under the job schedule. A job schedule can have at + most one active job under it at any given time. Because a job schedule can + have at most one active job under it at any given time, if it is time to + create a new job under a job schedule, but the previous job is still + running, the Batch service will not create the new job until the previous + job finishes. If the previous job does not finish within the startWindow + period of the new recurrenceInterval, then no new job will be scheduled + for that interval. For recurring jobs, you should normally specify a + jobManagerTask in the jobSpecification. If you do not use jobManagerTask, + you will need an external process to monitor when jobs are created, add + tasks to the jobs and terminate the jobs ready for the next recurrence. + The default is that the schedule does not recur: one job is created, + within the startWindow after the doNotRunUntil time, and the schedule is + complete as soon as that job finishes. The minimum value is 1 minute. If + you specify a lower value, the Batch service rejects the schedule with an + error; if you are calling the REST API directly, the HTTP status code is + 400 (Bad Request). + :type recurrence_interval: timedelta + """ + + _attribute_map = { + 'do_not_run_until': {'key': 'doNotRunUntil', 'type': 'iso-8601'}, + 'do_not_run_after': {'key': 'doNotRunAfter', 'type': 'iso-8601'}, + 'start_window': {'key': 'startWindow', 'type': 'duration'}, + 'recurrence_interval': {'key': 'recurrenceInterval', 'type': 'duration'}, + } + + def __init__(self, *, do_not_run_until=None, do_not_run_after=None, start_window=None, recurrence_interval=None, **kwargs) -> None: + super(Schedule, self).__init__(**kwargs) + self.do_not_run_until = do_not_run_until + self.do_not_run_after = do_not_run_after + self.start_window = start_window + self.recurrence_interval = recurrence_interval diff --git a/azext/generated/sdk/batch/v2018_08_01/models/start_task.py b/azext/generated/sdk/batch/v2018_08_01/models/start_task.py new file mode 100644 index 00000000..9f0ad707 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/start_task.py @@ -0,0 +1,110 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class StartTask(Model): + """A task which is run when a compute node joins a pool in the Azure Batch + service, or when the compute node is rebooted or reimaged. + + Batch will retry tasks when a recovery operation is triggered on a compute + node. Examples of recovery operations include (but are not limited to) when + an unhealthy compute node is rebooted or a compute node disappeared due to + host failure. Retries due to recovery operations are independent of and are + not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is + 0, an internal retry due to a recovery operation may occur. Because of + this, all tasks should be idempotent. This means tasks need to tolerate + being interrupted and restarted without causing any corruption or duplicate + data. The best practice for long running tasks is to use some form of + checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param command_line: Required. The command line of the start task. The + command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + start task runs. When this is specified, all directories recursively below + the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the + node) are mapped into the container, all task environment variables are + mapped into the container, and the task command line is executed in the + container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. Files listed under this + element are located in the task's working directory. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the start task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param user_identity: The user identity under which the start task runs. + If omitted, the task runs as a non-administrative user unique to the task. + :type user_identity: ~azure.batch.models.UserIdentity + :param max_task_retry_count: The maximum number of times the task may be + retried. The Batch service retries a task if its exit code is nonzero. + Note that this value specifically controls the number of retries. The + Batch service will try the task once, and may then retry up to this limit. + For example, if the maximum retry count is 3, Batch tries the task up to 4 + times (one initial try and 3 retries). If the maximum retry count is 0, + the Batch service does not retry the task. If the maximum retry count is + -1, the Batch service retries the task without limit. + :type max_task_retry_count: int + :param wait_for_success: Whether the Batch service should wait for the + start task to complete successfully (that is, to exit with exit code 0) + before scheduling any tasks on the compute node. If true and the start + task fails on a compute node, the Batch service retries the start task up + to its maximum retry count (maxTaskRetryCount). If the task has still not + completed successfully after all retries, then the Batch service marks the + compute node unusable, and will not schedule tasks to it. This condition + can be detected via the node state and failure info details. If false, the + Batch service will not wait for the start task to complete. In this case, + other tasks can start executing on the compute node while the start task + is still running; and even if the start task fails, new tasks will + continue to be scheduled on the node. The default is false. + :type wait_for_success: bool + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + 'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(StartTask, self).__init__(**kwargs) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.resource_files = kwargs.get('resource_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.user_identity = kwargs.get('user_identity', None) + self.max_task_retry_count = kwargs.get('max_task_retry_count', None) + self.wait_for_success = kwargs.get('wait_for_success', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/start_task_information.py b/azext/generated/sdk/batch/v2018_08_01/models/start_task_information.py new file mode 100644 index 00000000..e8b68b08 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/start_task_information.py @@ -0,0 +1,99 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class StartTaskInformation(Model): + """Information about a start task running on a compute node. + + All required parameters must be populated in order to send to Azure. + + :param state: Required. The state of the start task on the compute node. + Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.StartTaskState + :param start_time: Required. The time at which the start task started + running. This value is reset every time the task is restarted or retried + (that is, this is the most recent time at which the start task started + running). + :type start_time: datetime + :param end_time: The time at which the start task stopped running. This is + the end time of the most recent run of the start task, if that run has + completed (even if that run failed and a retry is pending). This element + is not present if the start task is currently running. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the start task + command line. This property is set only if the start task is in the + completed state. In general, the exit code for a process reflects the + specific convention implemented by the application developer for that + process. If you use the exit code value to make decisions in your code, be + sure that you know the exit code convention used by the application + process. However, if the Batch service terminates the start task (due to + timeout, or user termination via the API) you may see an operating + system-defined exit code. + :type exit_code: int + :param container_info: Information about the container under which the + task is executing. This property is set only if the task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the task failure, if any. This + property is set only if the task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the task could not be run) and + file upload errors are not retried. The Batch service will retry the task + up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the task + started running. This element is present only if the task was retried + (i.e. retryCount is nonzero). If present, this is typically the same as + startTime, but may be different if the task has been restarted for reasons + other than retry; for example, if the compute node was rebooted during a + retry, then the startTime is updated but the lastRetryTime is not. + :type last_retry_time: datetime + :param result: The result of the task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'state': {'required': True}, + 'start_time': {'required': True}, + 'retry_count': {'required': True}, + } + + _attribute_map = { + 'state': {'key': 'state', 'type': 'StartTaskState'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, **kwargs): + super(StartTaskInformation, self).__init__(**kwargs) + self.state = kwargs.get('state', None) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.exit_code = kwargs.get('exit_code', None) + self.container_info = kwargs.get('container_info', None) + self.failure_info = kwargs.get('failure_info', None) + self.retry_count = kwargs.get('retry_count', None) + self.last_retry_time = kwargs.get('last_retry_time', None) + self.result = kwargs.get('result', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/start_task_information_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/start_task_information_py3.py new file mode 100644 index 00000000..cb434ab2 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/start_task_information_py3.py @@ -0,0 +1,99 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class StartTaskInformation(Model): + """Information about a start task running on a compute node. + + All required parameters must be populated in order to send to Azure. + + :param state: Required. The state of the start task on the compute node. + Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.StartTaskState + :param start_time: Required. The time at which the start task started + running. This value is reset every time the task is restarted or retried + (that is, this is the most recent time at which the start task started + running). + :type start_time: datetime + :param end_time: The time at which the start task stopped running. This is + the end time of the most recent run of the start task, if that run has + completed (even if that run failed and a retry is pending). This element + is not present if the start task is currently running. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the start task + command line. This property is set only if the start task is in the + completed state. In general, the exit code for a process reflects the + specific convention implemented by the application developer for that + process. If you use the exit code value to make decisions in your code, be + sure that you know the exit code convention used by the application + process. However, if the Batch service terminates the start task (due to + timeout, or user termination via the API) you may see an operating + system-defined exit code. + :type exit_code: int + :param container_info: Information about the container under which the + task is executing. This property is set only if the task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the task failure, if any. This + property is set only if the task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the task could not be run) and + file upload errors are not retried. The Batch service will retry the task + up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the task + started running. This element is present only if the task was retried + (i.e. retryCount is nonzero). If present, this is typically the same as + startTime, but may be different if the task has been restarted for reasons + other than retry; for example, if the compute node was rebooted during a + retry, then the startTime is updated but the lastRetryTime is not. + :type last_retry_time: datetime + :param result: The result of the task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'state': {'required': True}, + 'start_time': {'required': True}, + 'retry_count': {'required': True}, + } + + _attribute_map = { + 'state': {'key': 'state', 'type': 'StartTaskState'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, *, state, start_time, retry_count: int, end_time=None, exit_code: int=None, container_info=None, failure_info=None, last_retry_time=None, result=None, **kwargs) -> None: + super(StartTaskInformation, self).__init__(**kwargs) + self.state = state + self.start_time = start_time + self.end_time = end_time + self.exit_code = exit_code + self.container_info = container_info + self.failure_info = failure_info + self.retry_count = retry_count + self.last_retry_time = last_retry_time + self.result = result diff --git a/azext/generated/sdk/batch/v2018_08_01/models/start_task_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/start_task_py3.py new file mode 100644 index 00000000..7fb95815 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/start_task_py3.py @@ -0,0 +1,110 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class StartTask(Model): + """A task which is run when a compute node joins a pool in the Azure Batch + service, or when the compute node is rebooted or reimaged. + + Batch will retry tasks when a recovery operation is triggered on a compute + node. Examples of recovery operations include (but are not limited to) when + an unhealthy compute node is rebooted or a compute node disappeared due to + host failure. Retries due to recovery operations are independent of and are + not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is + 0, an internal retry due to a recovery operation may occur. Because of + this, all tasks should be idempotent. This means tasks need to tolerate + being interrupted and restarted without causing any corruption or duplicate + data. The best practice for long running tasks is to use some form of + checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param command_line: Required. The command line of the start task. The + command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + start task runs. When this is specified, all directories recursively below + the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the + node) are mapped into the container, all task environment variables are + mapped into the container, and the task command line is executed in the + container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. Files listed under this + element are located in the task's working directory. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the start task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param user_identity: The user identity under which the start task runs. + If omitted, the task runs as a non-administrative user unique to the task. + :type user_identity: ~azure.batch.models.UserIdentity + :param max_task_retry_count: The maximum number of times the task may be + retried. The Batch service retries a task if its exit code is nonzero. + Note that this value specifically controls the number of retries. The + Batch service will try the task once, and may then retry up to this limit. + For example, if the maximum retry count is 3, Batch tries the task up to 4 + times (one initial try and 3 retries). If the maximum retry count is 0, + the Batch service does not retry the task. If the maximum retry count is + -1, the Batch service retries the task without limit. + :type max_task_retry_count: int + :param wait_for_success: Whether the Batch service should wait for the + start task to complete successfully (that is, to exit with exit code 0) + before scheduling any tasks on the compute node. If true and the start + task fails on a compute node, the Batch service retries the start task up + to its maximum retry count (maxTaskRetryCount). If the task has still not + completed successfully after all retries, then the Batch service marks the + compute node unusable, and will not schedule tasks to it. This condition + can be detected via the node state and failure info details. If false, the + Batch service will not wait for the start task to complete. In this case, + other tasks can start executing on the compute node while the start task + is still running; and even if the start task fails, new tasks will + continue to be scheduled on the node. The default is false. + :type wait_for_success: bool + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + 'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'}, + } + + def __init__(self, *, command_line: str, container_settings=None, resource_files=None, environment_settings=None, user_identity=None, max_task_retry_count: int=None, wait_for_success: bool=None, **kwargs) -> None: + super(StartTask, self).__init__(**kwargs) + self.command_line = command_line + self.container_settings = container_settings + self.resource_files = resource_files + self.environment_settings = environment_settings + self.user_identity = user_identity + self.max_task_retry_count = max_task_retry_count + self.wait_for_success = wait_for_success diff --git a/azext/generated/sdk/batch/v2018_08_01/models/subtask_information.py b/azext/generated/sdk/batch/v2018_08_01/models/subtask_information.py new file mode 100644 index 00000000..dbbff704 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/subtask_information.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class SubtaskInformation(Model): + """Information about an Azure Batch subtask. + + :param id: The ID of the subtask. + :type id: int + :param node_info: Information about the compute node on which the subtask + ran. + :type node_info: ~azure.batch.models.ComputeNodeInformation + :param start_time: The time at which the subtask started running. If the + subtask has been restarted or retried, this is the most recent time at + which the subtask started running. + :type start_time: datetime + :param end_time: The time at which the subtask completed. This property is + set only if the subtask is in the Completed state. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the subtask + command line. This property is set only if the subtask is in the completed + state. In general, the exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. + However, if the Batch service terminates the subtask (due to timeout, or + user termination via the API) you may see an operating system-defined exit + code. + :type exit_code: int + :param container_info: Information about the container under which the + task is executing. This property is set only if the task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the task failure, if any. This + property is set only if the task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param state: The current state of the subtask. Possible values include: + 'preparing', 'running', 'completed' + :type state: str or ~azure.batch.models.SubtaskState + :param state_transition_time: The time at which the subtask entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the subtask. This property is + not set if the subtask is in its initial running state. Possible values + include: 'preparing', 'running', 'completed' + :type previous_state: str or ~azure.batch.models.SubtaskState + :param previous_state_transition_time: The time at which the subtask + entered its previous state. This property is not set if the subtask is in + its initial running state. + :type previous_state_transition_time: datetime + :param result: The result of the task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'int'}, + 'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'state': {'key': 'state', 'type': 'SubtaskState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'SubtaskState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, **kwargs): + super(SubtaskInformation, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.node_info = kwargs.get('node_info', None) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.exit_code = kwargs.get('exit_code', None) + self.container_info = kwargs.get('container_info', None) + self.failure_info = kwargs.get('failure_info', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.previous_state = kwargs.get('previous_state', None) + self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) + self.result = kwargs.get('result', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/subtask_information_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/subtask_information_py3.py new file mode 100644 index 00000000..1399c866 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/subtask_information_py3.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class SubtaskInformation(Model): + """Information about an Azure Batch subtask. + + :param id: The ID of the subtask. + :type id: int + :param node_info: Information about the compute node on which the subtask + ran. + :type node_info: ~azure.batch.models.ComputeNodeInformation + :param start_time: The time at which the subtask started running. If the + subtask has been restarted or retried, this is the most recent time at + which the subtask started running. + :type start_time: datetime + :param end_time: The time at which the subtask completed. This property is + set only if the subtask is in the Completed state. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the subtask + command line. This property is set only if the subtask is in the completed + state. In general, the exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. + However, if the Batch service terminates the subtask (due to timeout, or + user termination via the API) you may see an operating system-defined exit + code. + :type exit_code: int + :param container_info: Information about the container under which the + task is executing. This property is set only if the task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the task failure, if any. This + property is set only if the task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param state: The current state of the subtask. Possible values include: + 'preparing', 'running', 'completed' + :type state: str or ~azure.batch.models.SubtaskState + :param state_transition_time: The time at which the subtask entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the subtask. This property is + not set if the subtask is in its initial running state. Possible values + include: 'preparing', 'running', 'completed' + :type previous_state: str or ~azure.batch.models.SubtaskState + :param previous_state_transition_time: The time at which the subtask + entered its previous state. This property is not set if the subtask is in + its initial running state. + :type previous_state_transition_time: datetime + :param result: The result of the task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'int'}, + 'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'state': {'key': 'state', 'type': 'SubtaskState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'SubtaskState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, *, id: int=None, node_info=None, start_time=None, end_time=None, exit_code: int=None, container_info=None, failure_info=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, result=None, **kwargs) -> None: + super(SubtaskInformation, self).__init__(**kwargs) + self.id = id + self.node_info = node_info + self.start_time = start_time + self.end_time = end_time + self.exit_code = exit_code + self.container_info = container_info + self.failure_info = failure_info + self.state = state + self.state_transition_time = state_transition_time + self.previous_state = previous_state + self.previous_state_transition_time = previous_state_transition_time + self.result = result diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_add_collection_options.py b/azext/generated/sdk/batch/v2018_08_01/models/task_add_collection_options.py new file mode 100644 index 00000000..f0622c9c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_add_collection_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionOptions(Model): + """Additional parameters for add_collection operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskAddCollectionOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_add_collection_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_add_collection_options_py3.py new file mode 100644 index 00000000..634f522c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_add_collection_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionOptions(Model): + """Additional parameters for add_collection operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(TaskAddCollectionOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_add_collection_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/task_add_collection_parameter.py new file mode 100644 index 00000000..56b615c4 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_add_collection_parameter.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionParameter(Model): + """A collection of Azure Batch tasks to add. + + All required parameters must be populated in order to send to Azure. + + :param value: Required. The collection of tasks to add. The maximum count + of tasks is 100. The total serialized size of this collection must be less + than 1MB. If it is greater than 1MB (for example if each task has 100's of + resource files or environment variables), the request will fail with code + 'RequestBodyTooLarge' and should be retried again with fewer tasks. + :type value: list[~azure.batch.models.TaskAddParameter] + """ + + _validation = { + 'value': {'required': True, 'max_items': 100}, + } + + _attribute_map = { + 'value': {'key': 'value', 'type': '[TaskAddParameter]'}, + } + + def __init__(self, **kwargs): + super(TaskAddCollectionParameter, self).__init__(**kwargs) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_add_collection_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_add_collection_parameter_py3.py new file mode 100644 index 00000000..bfeaf536 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_add_collection_parameter_py3.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionParameter(Model): + """A collection of Azure Batch tasks to add. + + All required parameters must be populated in order to send to Azure. + + :param value: Required. The collection of tasks to add. The maximum count + of tasks is 100. The total serialized size of this collection must be less + than 1MB. If it is greater than 1MB (for example if each task has 100's of + resource files or environment variables), the request will fail with code + 'RequestBodyTooLarge' and should be retried again with fewer tasks. + :type value: list[~azure.batch.models.TaskAddParameter] + """ + + _validation = { + 'value': {'required': True, 'max_items': 100}, + } + + _attribute_map = { + 'value': {'key': 'value', 'type': '[TaskAddParameter]'}, + } + + def __init__(self, *, value, **kwargs) -> None: + super(TaskAddCollectionParameter, self).__init__(**kwargs) + self.value = value diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_add_collection_result.py b/azext/generated/sdk/batch/v2018_08_01/models/task_add_collection_result.py new file mode 100644 index 00000000..0dbc1420 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_add_collection_result.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionResult(Model): + """The result of adding a collection of tasks to a job. + + :param value: The results of the add task collection operation. + :type value: list[~azure.batch.models.TaskAddResult] + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[TaskAddResult]'}, + } + + def __init__(self, **kwargs): + super(TaskAddCollectionResult, self).__init__(**kwargs) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_add_collection_result_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_add_collection_result_py3.py new file mode 100644 index 00000000..06cde63a --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_add_collection_result_py3.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionResult(Model): + """The result of adding a collection of tasks to a job. + + :param value: The results of the add task collection operation. + :type value: list[~azure.batch.models.TaskAddResult] + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[TaskAddResult]'}, + } + + def __init__(self, *, value=None, **kwargs) -> None: + super(TaskAddCollectionResult, self).__init__(**kwargs) + self.value = value diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_add_options.py b/azext/generated/sdk/batch/v2018_08_01/models/task_add_options.py new file mode 100644 index 00000000..667cc19d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_add_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskAddOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_add_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_add_options_py3.py new file mode 100644 index 00000000..da9c6a8c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_add_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(TaskAddOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_add_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/task_add_parameter.py new file mode 100644 index 00000000..c0ac6987 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_add_parameter.py @@ -0,0 +1,167 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddParameter(Model): + """An Azure Batch task to add. + + Batch will retry tasks when a recovery operation is triggered on a compute + node. Examples of recovery operations include (but are not limited to) when + an unhealthy compute node is rebooted or a compute node disappeared due to + host failure. Retries due to recovery operations are independent of and are + not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is + 0, an internal retry due to a recovery operation may occur. Because of + this, all tasks should be idempotent. This means tasks need to tolerate + being interrupted and restarted without causing any corruption or duplicate + data. The best practice for long running tasks is to use some form of + checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the task within the + job. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within a job that differ only by case). + :type id: str + :param display_name: A display name for the task. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param command_line: Required. The command line of the task. For + multi-instance tasks, the command line is executed as the primary task, + after the primary task and all subtasks have finished executing the + coordination command line. The command line does not run under a shell, + and therefore cannot take advantage of shell features such as environment + variable expansion. If you want to take advantage of such features, you + should invoke the shell in the command line, for example using "cmd /c + MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command + line refers to file paths, it should use a relative path (relative to the + task working directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + task runs. If the pool that will run this task has containerConfiguration + set, this must be set as well. If the pool that will run this task doesn't + have containerConfiguration set, this must not be set. When this is + specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR + (the root of Azure Batch directories on the node) are mapped into the + container, all task environment variables are mapped into the container, + and the task command line is executed in the container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param exit_conditions: How the Batch service should respond when the task + completes. + :type exit_conditions: ~azure.batch.models.ExitConditions + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. For + multi-instance tasks, the resource files will only be downloaded to the + compute node on which the primary task is executed. There is a maximum + size for the list of resource files. When the max size is exceeded, the + request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the compute node after running the command line. For multi-instance + tasks, the files will only be uploaded from the compute node on which the + primary task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param affinity_info: A locality hint that can be used by the Batch + service to select a compute node on which to start the new task. + :type affinity_info: ~azure.batch.models.AffinityInformation + :param constraints: The execution constraints that apply to this task. If + you do not specify constraints, the maxTaskRetryCount is the + maxTaskRetryCount specified for the job, and the maxWallClockTime and + retentionTime are infinite. + :type constraints: ~azure.batch.models.TaskConstraints + :param user_identity: The user identity under which the task runs. If + omitted, the task runs as a non-administrative user unique to the task. + :type user_identity: ~azure.batch.models.UserIdentity + :param multi_instance_settings: An object that indicates that the task is + a multi-instance task, and contains information about how to run the + multi-instance task. + :type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings + :param depends_on: The tasks that this task depends on. This task will not + be scheduled until all tasks that it depends on have completed + successfully. If any of those tasks fail and exhaust their retry counts, + this task will never be scheduled. If the job does not have + usesTaskDependencies set to true, and this element is present, the request + fails with error code TaskDependenciesNotSpecifiedOnJob. + :type depends_on: ~azure.batch.models.TaskDependencies + :param application_package_references: A list of application packages that + the Batch service will deploy to the compute node before running the + command line. Application packages are downloaded and deployed to a shared + directory, not the task working directory. Therefore, if a referenced + package is already on the compute node, and is up to date, then it is not + re-downloaded; the existing copy on the compute node is used. If a + referenced application package cannot be installed, for example because + the package has been deleted or because download failed, the task fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the task can use to perform Batch service operations. If this + property is set, the Batch service provides the task with an + authentication token which can be used to authenticate Batch service + operations without requiring an account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the task can carry out using the token depend on the settings. For + example, a task can request job permissions in order to add other tasks to + the job, or check the status of the job or of other tasks under the job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + """ + + _validation = { + 'id': {'required': True}, + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'}, + 'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + } + + def __init__(self, **kwargs): + super(TaskAddParameter, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.exit_conditions = kwargs.get('exit_conditions', None) + self.resource_files = kwargs.get('resource_files', None) + self.output_files = kwargs.get('output_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.affinity_info = kwargs.get('affinity_info', None) + self.constraints = kwargs.get('constraints', None) + self.user_identity = kwargs.get('user_identity', None) + self.multi_instance_settings = kwargs.get('multi_instance_settings', None) + self.depends_on = kwargs.get('depends_on', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.authentication_token_settings = kwargs.get('authentication_token_settings', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_add_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_add_parameter_py3.py new file mode 100644 index 00000000..31f571e7 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_add_parameter_py3.py @@ -0,0 +1,167 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddParameter(Model): + """An Azure Batch task to add. + + Batch will retry tasks when a recovery operation is triggered on a compute + node. Examples of recovery operations include (but are not limited to) when + an unhealthy compute node is rebooted or a compute node disappeared due to + host failure. Retries due to recovery operations are independent of and are + not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is + 0, an internal retry due to a recovery operation may occur. Because of + this, all tasks should be idempotent. This means tasks need to tolerate + being interrupted and restarted without causing any corruption or duplicate + data. The best practice for long running tasks is to use some form of + checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the task within the + job. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within a job that differ only by case). + :type id: str + :param display_name: A display name for the task. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param command_line: Required. The command line of the task. For + multi-instance tasks, the command line is executed as the primary task, + after the primary task and all subtasks have finished executing the + coordination command line. The command line does not run under a shell, + and therefore cannot take advantage of shell features such as environment + variable expansion. If you want to take advantage of such features, you + should invoke the shell in the command line, for example using "cmd /c + MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command + line refers to file paths, it should use a relative path (relative to the + task working directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + task runs. If the pool that will run this task has containerConfiguration + set, this must be set as well. If the pool that will run this task doesn't + have containerConfiguration set, this must not be set. When this is + specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR + (the root of Azure Batch directories on the node) are mapped into the + container, all task environment variables are mapped into the container, + and the task command line is executed in the container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param exit_conditions: How the Batch service should respond when the task + completes. + :type exit_conditions: ~azure.batch.models.ExitConditions + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. For + multi-instance tasks, the resource files will only be downloaded to the + compute node on which the primary task is executed. There is a maximum + size for the list of resource files. When the max size is exceeded, the + request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the compute node after running the command line. For multi-instance + tasks, the files will only be uploaded from the compute node on which the + primary task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param affinity_info: A locality hint that can be used by the Batch + service to select a compute node on which to start the new task. + :type affinity_info: ~azure.batch.models.AffinityInformation + :param constraints: The execution constraints that apply to this task. If + you do not specify constraints, the maxTaskRetryCount is the + maxTaskRetryCount specified for the job, and the maxWallClockTime and + retentionTime are infinite. + :type constraints: ~azure.batch.models.TaskConstraints + :param user_identity: The user identity under which the task runs. If + omitted, the task runs as a non-administrative user unique to the task. + :type user_identity: ~azure.batch.models.UserIdentity + :param multi_instance_settings: An object that indicates that the task is + a multi-instance task, and contains information about how to run the + multi-instance task. + :type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings + :param depends_on: The tasks that this task depends on. This task will not + be scheduled until all tasks that it depends on have completed + successfully. If any of those tasks fail and exhaust their retry counts, + this task will never be scheduled. If the job does not have + usesTaskDependencies set to true, and this element is present, the request + fails with error code TaskDependenciesNotSpecifiedOnJob. + :type depends_on: ~azure.batch.models.TaskDependencies + :param application_package_references: A list of application packages that + the Batch service will deploy to the compute node before running the + command line. Application packages are downloaded and deployed to a shared + directory, not the task working directory. Therefore, if a referenced + package is already on the compute node, and is up to date, then it is not + re-downloaded; the existing copy on the compute node is used. If a + referenced application package cannot be installed, for example because + the package has been deleted or because download failed, the task fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the task can use to perform Batch service operations. If this + property is set, the Batch service provides the task with an + authentication token which can be used to authenticate Batch service + operations without requiring an account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the task can carry out using the token depend on the settings. For + example, a task can request job permissions in order to add other tasks to + the job, or check the status of the job or of other tasks under the job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + """ + + _validation = { + 'id': {'required': True}, + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'}, + 'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + } + + def __init__(self, *, id: str, command_line: str, display_name: str=None, container_settings=None, exit_conditions=None, resource_files=None, output_files=None, environment_settings=None, affinity_info=None, constraints=None, user_identity=None, multi_instance_settings=None, depends_on=None, application_package_references=None, authentication_token_settings=None, **kwargs) -> None: + super(TaskAddParameter, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.command_line = command_line + self.container_settings = container_settings + self.exit_conditions = exit_conditions + self.resource_files = resource_files + self.output_files = output_files + self.environment_settings = environment_settings + self.affinity_info = affinity_info + self.constraints = constraints + self.user_identity = user_identity + self.multi_instance_settings = multi_instance_settings + self.depends_on = depends_on + self.application_package_references = application_package_references + self.authentication_token_settings = authentication_token_settings diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_add_result.py b/azext/generated/sdk/batch/v2018_08_01/models/task_add_result.py new file mode 100644 index 00000000..7528e30d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_add_result.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddResult(Model): + """Result for a single task added as part of an add task collection operation. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. The status of the add task request. Possible + values include: 'success', 'clientError', 'serverError' + :type status: str or ~azure.batch.models.TaskAddStatus + :param task_id: Required. The ID of the task for which this is the result. + :type task_id: str + :param e_tag: The ETag of the task, if the task was successfully added. + You can use this to detect whether the task has changed between requests. + In particular, you can be pass the ETag with an Update Task request to + specify that your changes should take effect only if nobody else has + modified the job in the meantime. + :type e_tag: str + :param last_modified: The last modified time of the task. + :type last_modified: datetime + :param location: The URL of the task, if the task was successfully added. + :type location: str + :param error: The error encountered while attempting to add the task. + :type error: ~azure.batch.models.BatchError + """ + + _validation = { + 'status': {'required': True}, + 'task_id': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'TaskAddStatus'}, + 'task_id': {'key': 'taskId', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'location': {'key': 'location', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'BatchError'}, + } + + def __init__(self, **kwargs): + super(TaskAddResult, self).__init__(**kwargs) + self.status = kwargs.get('status', None) + self.task_id = kwargs.get('task_id', None) + self.e_tag = kwargs.get('e_tag', None) + self.last_modified = kwargs.get('last_modified', None) + self.location = kwargs.get('location', None) + self.error = kwargs.get('error', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_add_result_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_add_result_py3.py new file mode 100644 index 00000000..7add806b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_add_result_py3.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddResult(Model): + """Result for a single task added as part of an add task collection operation. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. The status of the add task request. Possible + values include: 'success', 'clientError', 'serverError' + :type status: str or ~azure.batch.models.TaskAddStatus + :param task_id: Required. The ID of the task for which this is the result. + :type task_id: str + :param e_tag: The ETag of the task, if the task was successfully added. + You can use this to detect whether the task has changed between requests. + In particular, you can be pass the ETag with an Update Task request to + specify that your changes should take effect only if nobody else has + modified the job in the meantime. + :type e_tag: str + :param last_modified: The last modified time of the task. + :type last_modified: datetime + :param location: The URL of the task, if the task was successfully added. + :type location: str + :param error: The error encountered while attempting to add the task. + :type error: ~azure.batch.models.BatchError + """ + + _validation = { + 'status': {'required': True}, + 'task_id': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'TaskAddStatus'}, + 'task_id': {'key': 'taskId', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'location': {'key': 'location', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'BatchError'}, + } + + def __init__(self, *, status, task_id: str, e_tag: str=None, last_modified=None, location: str=None, error=None, **kwargs) -> None: + super(TaskAddResult, self).__init__(**kwargs) + self.status = status + self.task_id = task_id + self.e_tag = e_tag + self.last_modified = last_modified + self.location = location + self.error = error diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_constraints.py b/azext/generated/sdk/batch/v2018_08_01/models/task_constraints.py new file mode 100644 index 00000000..22898fad --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_constraints.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskConstraints(Model): + """Execution constraints to apply to a task. + + :param max_wall_clock_time: The maximum elapsed time that the task may + run, measured from the time the task starts. If the task does not complete + within the time limit, the Batch service terminates it. If this is not + specified, there is no time limit on how long the task may run. + :type max_wall_clock_time: timedelta + :param retention_time: The minimum time to retain the task directory on + the compute node where it ran, from the time it completes execution. After + this time, the Batch service may delete the task directory and all its + contents. The default is infinite, i.e. the task directory will be + retained until the compute node is removed or reimaged. + :type retention_time: timedelta + :param max_task_retry_count: The maximum number of times the task may be + retried. The Batch service retries a task if its exit code is nonzero. + Note that this value specifically controls the number of retries for the + task executable due to a nonzero exit code. The Batch service will try the + task once, and may then retry up to this limit. For example, if the + maximum retry count is 3, Batch tries the task up to 4 times (one initial + try and 3 retries). If the maximum retry count is 0, the Batch service + does not retry the task after the first attempt. If the maximum retry + count is -1, the Batch service retries the task without limit. Resource + files and application packages are only downloaded again if the task is + retried on a new compute node. + :type max_task_retry_count: int + """ + + _attribute_map = { + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'retention_time': {'key': 'retentionTime', 'type': 'duration'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(TaskConstraints, self).__init__(**kwargs) + self.max_wall_clock_time = kwargs.get('max_wall_clock_time', None) + self.retention_time = kwargs.get('retention_time', None) + self.max_task_retry_count = kwargs.get('max_task_retry_count', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_constraints_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_constraints_py3.py new file mode 100644 index 00000000..2070d096 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_constraints_py3.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskConstraints(Model): + """Execution constraints to apply to a task. + + :param max_wall_clock_time: The maximum elapsed time that the task may + run, measured from the time the task starts. If the task does not complete + within the time limit, the Batch service terminates it. If this is not + specified, there is no time limit on how long the task may run. + :type max_wall_clock_time: timedelta + :param retention_time: The minimum time to retain the task directory on + the compute node where it ran, from the time it completes execution. After + this time, the Batch service may delete the task directory and all its + contents. The default is infinite, i.e. the task directory will be + retained until the compute node is removed or reimaged. + :type retention_time: timedelta + :param max_task_retry_count: The maximum number of times the task may be + retried. The Batch service retries a task if its exit code is nonzero. + Note that this value specifically controls the number of retries for the + task executable due to a nonzero exit code. The Batch service will try the + task once, and may then retry up to this limit. For example, if the + maximum retry count is 3, Batch tries the task up to 4 times (one initial + try and 3 retries). If the maximum retry count is 0, the Batch service + does not retry the task after the first attempt. If the maximum retry + count is -1, the Batch service retries the task without limit. Resource + files and application packages are only downloaded again if the task is + retried on a new compute node. + :type max_task_retry_count: int + """ + + _attribute_map = { + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'retention_time': {'key': 'retentionTime', 'type': 'duration'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + } + + def __init__(self, *, max_wall_clock_time=None, retention_time=None, max_task_retry_count: int=None, **kwargs) -> None: + super(TaskConstraints, self).__init__(**kwargs) + self.max_wall_clock_time = max_wall_clock_time + self.retention_time = retention_time + self.max_task_retry_count = max_task_retry_count diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_container_execution_information.py b/azext/generated/sdk/batch/v2018_08_01/models/task_container_execution_information.py new file mode 100644 index 00000000..6ade9177 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_container_execution_information.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskContainerExecutionInformation(Model): + """Contains information about the container which a task is executing. + + :param container_id: The ID of the container. + :type container_id: str + :param state: The state of the container. This is the state of the + container according to the Docker service. It is equivalent to the status + field returned by "docker inspect". + :type state: str + :param error: Detailed error information about the container. This is the + detailed error string from the Docker service, if available. It is + equivalent to the error field returned by "docker inspect". + :type error: str + """ + + _attribute_map = { + 'container_id': {'key': 'containerId', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(TaskContainerExecutionInformation, self).__init__(**kwargs) + self.container_id = kwargs.get('container_id', None) + self.state = kwargs.get('state', None) + self.error = kwargs.get('error', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_container_execution_information_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_container_execution_information_py3.py new file mode 100644 index 00000000..44f9e7e6 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_container_execution_information_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskContainerExecutionInformation(Model): + """Contains information about the container which a task is executing. + + :param container_id: The ID of the container. + :type container_id: str + :param state: The state of the container. This is the state of the + container according to the Docker service. It is equivalent to the status + field returned by "docker inspect". + :type state: str + :param error: Detailed error information about the container. This is the + detailed error string from the Docker service, if available. It is + equivalent to the error field returned by "docker inspect". + :type error: str + """ + + _attribute_map = { + 'container_id': {'key': 'containerId', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'str'}, + } + + def __init__(self, *, container_id: str=None, state: str=None, error: str=None, **kwargs) -> None: + super(TaskContainerExecutionInformation, self).__init__(**kwargs) + self.container_id = container_id + self.state = state + self.error = error diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_container_settings.py b/azext/generated/sdk/batch/v2018_08_01/models/task_container_settings.py new file mode 100644 index 00000000..ac1a56f8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_container_settings.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskContainerSettings(Model): + """The container settings for a task. + + All required parameters must be populated in order to send to Azure. + + :param container_run_options: Additional options to the container create + command. These additional options are supplied as arguments to the "docker + create" command, in addition to those controlled by the Batch Service. + :type container_run_options: str + :param image_name: Required. The image to use to create the container in + which the task will run. This is the full image reference, as would be + specified to "docker pull". If no tag is provided as part of the image + name, the tag ":latest" is used as a default. + :type image_name: str + :param registry: The private registry which contains the container image. + This setting can be omitted if was already provided at pool creation. + :type registry: ~azure.batch.models.ContainerRegistry + """ + + _validation = { + 'image_name': {'required': True}, + } + + _attribute_map = { + 'container_run_options': {'key': 'containerRunOptions', 'type': 'str'}, + 'image_name': {'key': 'imageName', 'type': 'str'}, + 'registry': {'key': 'registry', 'type': 'ContainerRegistry'}, + } + + def __init__(self, **kwargs): + super(TaskContainerSettings, self).__init__(**kwargs) + self.container_run_options = kwargs.get('container_run_options', None) + self.image_name = kwargs.get('image_name', None) + self.registry = kwargs.get('registry', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_container_settings_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_container_settings_py3.py new file mode 100644 index 00000000..dabd7b99 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_container_settings_py3.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskContainerSettings(Model): + """The container settings for a task. + + All required parameters must be populated in order to send to Azure. + + :param container_run_options: Additional options to the container create + command. These additional options are supplied as arguments to the "docker + create" command, in addition to those controlled by the Batch Service. + :type container_run_options: str + :param image_name: Required. The image to use to create the container in + which the task will run. This is the full image reference, as would be + specified to "docker pull". If no tag is provided as part of the image + name, the tag ":latest" is used as a default. + :type image_name: str + :param registry: The private registry which contains the container image. + This setting can be omitted if was already provided at pool creation. + :type registry: ~azure.batch.models.ContainerRegistry + """ + + _validation = { + 'image_name': {'required': True}, + } + + _attribute_map = { + 'container_run_options': {'key': 'containerRunOptions', 'type': 'str'}, + 'image_name': {'key': 'imageName', 'type': 'str'}, + 'registry': {'key': 'registry', 'type': 'ContainerRegistry'}, + } + + def __init__(self, *, image_name: str, container_run_options: str=None, registry=None, **kwargs) -> None: + super(TaskContainerSettings, self).__init__(**kwargs) + self.container_run_options = container_run_options + self.image_name = image_name + self.registry = registry diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_counts.py b/azext/generated/sdk/batch/v2018_08_01/models/task_counts.py new file mode 100644 index 00000000..057f2d7a --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_counts.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskCounts(Model): + """The task counts for a job. + + All required parameters must be populated in order to send to Azure. + + :param active: Required. The number of tasks in the active state. + :type active: int + :param running: Required. The number of tasks in the running or preparing + state. + :type running: int + :param completed: Required. The number of tasks in the completed state. + :type completed: int + :param succeeded: Required. The number of tasks which succeeded. A task + succeeds if its result (found in the executionInfo property) is 'success'. + :type succeeded: int + :param failed: Required. The number of tasks which failed. A task fails if + its result (found in the executionInfo property) is 'failure'. + :type failed: int + """ + + _validation = { + 'active': {'required': True}, + 'running': {'required': True}, + 'completed': {'required': True}, + 'succeeded': {'required': True}, + 'failed': {'required': True}, + } + + _attribute_map = { + 'active': {'key': 'active', 'type': 'int'}, + 'running': {'key': 'running', 'type': 'int'}, + 'completed': {'key': 'completed', 'type': 'int'}, + 'succeeded': {'key': 'succeeded', 'type': 'int'}, + 'failed': {'key': 'failed', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(TaskCounts, self).__init__(**kwargs) + self.active = kwargs.get('active', None) + self.running = kwargs.get('running', None) + self.completed = kwargs.get('completed', None) + self.succeeded = kwargs.get('succeeded', None) + self.failed = kwargs.get('failed', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_counts_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_counts_py3.py new file mode 100644 index 00000000..623c7dd8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_counts_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskCounts(Model): + """The task counts for a job. + + All required parameters must be populated in order to send to Azure. + + :param active: Required. The number of tasks in the active state. + :type active: int + :param running: Required. The number of tasks in the running or preparing + state. + :type running: int + :param completed: Required. The number of tasks in the completed state. + :type completed: int + :param succeeded: Required. The number of tasks which succeeded. A task + succeeds if its result (found in the executionInfo property) is 'success'. + :type succeeded: int + :param failed: Required. The number of tasks which failed. A task fails if + its result (found in the executionInfo property) is 'failure'. + :type failed: int + """ + + _validation = { + 'active': {'required': True}, + 'running': {'required': True}, + 'completed': {'required': True}, + 'succeeded': {'required': True}, + 'failed': {'required': True}, + } + + _attribute_map = { + 'active': {'key': 'active', 'type': 'int'}, + 'running': {'key': 'running', 'type': 'int'}, + 'completed': {'key': 'completed', 'type': 'int'}, + 'succeeded': {'key': 'succeeded', 'type': 'int'}, + 'failed': {'key': 'failed', 'type': 'int'}, + } + + def __init__(self, *, active: int, running: int, completed: int, succeeded: int, failed: int, **kwargs) -> None: + super(TaskCounts, self).__init__(**kwargs) + self.active = active + self.running = running + self.completed = completed + self.succeeded = succeeded + self.failed = failed diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_delete_options.py b/azext/generated/sdk/batch/v2018_08_01/models/task_delete_options.py new file mode 100644 index 00000000..2daf7608 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_delete_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskDeleteOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_delete_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_delete_options_py3.py new file mode 100644 index 00000000..4b836c65 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_delete_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(TaskDeleteOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_dependencies.py b/azext/generated/sdk/batch/v2018_08_01/models/task_dependencies.py new file mode 100644 index 00000000..f5bfb8c4 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_dependencies.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskDependencies(Model): + """Specifies any dependencies of a task. Any task that is explicitly specified + or within a dependency range must complete before the dependant task will + be scheduled. + + :param task_ids: The list of task IDs that this task depends on. All tasks + in this list must complete successfully before the dependent task can be + scheduled. The taskIds collection is limited to 64000 characters total + (i.e. the combined length of all task IDs). If the taskIds collection + exceeds the maximum length, the Add Task request fails with error code + TaskDependencyListTooLong. In this case consider using task ID ranges + instead. + :type task_ids: list[str] + :param task_id_ranges: The list of task ID ranges that this task depends + on. All tasks in all ranges must complete successfully before the + dependent task can be scheduled. + :type task_id_ranges: list[~azure.batch.models.TaskIdRange] + """ + + _attribute_map = { + 'task_ids': {'key': 'taskIds', 'type': '[str]'}, + 'task_id_ranges': {'key': 'taskIdRanges', 'type': '[TaskIdRange]'}, + } + + def __init__(self, **kwargs): + super(TaskDependencies, self).__init__(**kwargs) + self.task_ids = kwargs.get('task_ids', None) + self.task_id_ranges = kwargs.get('task_id_ranges', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_dependencies_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_dependencies_py3.py new file mode 100644 index 00000000..133f3268 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_dependencies_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskDependencies(Model): + """Specifies any dependencies of a task. Any task that is explicitly specified + or within a dependency range must complete before the dependant task will + be scheduled. + + :param task_ids: The list of task IDs that this task depends on. All tasks + in this list must complete successfully before the dependent task can be + scheduled. The taskIds collection is limited to 64000 characters total + (i.e. the combined length of all task IDs). If the taskIds collection + exceeds the maximum length, the Add Task request fails with error code + TaskDependencyListTooLong. In this case consider using task ID ranges + instead. + :type task_ids: list[str] + :param task_id_ranges: The list of task ID ranges that this task depends + on. All tasks in all ranges must complete successfully before the + dependent task can be scheduled. + :type task_id_ranges: list[~azure.batch.models.TaskIdRange] + """ + + _attribute_map = { + 'task_ids': {'key': 'taskIds', 'type': '[str]'}, + 'task_id_ranges': {'key': 'taskIdRanges', 'type': '[TaskIdRange]'}, + } + + def __init__(self, *, task_ids=None, task_id_ranges=None, **kwargs) -> None: + super(TaskDependencies, self).__init__(**kwargs) + self.task_ids = task_ids + self.task_id_ranges = task_id_ranges diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_execution_information.py b/azext/generated/sdk/batch/v2018_08_01/models/task_execution_information.py new file mode 100644 index 00000000..97e313dd --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_execution_information.py @@ -0,0 +1,109 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskExecutionInformation(Model): + """Information about the execution of a task. + + All required parameters must be populated in order to send to Azure. + + :param start_time: The time at which the task started running. 'Running' + corresponds to the running state, so if the task specifies resource files + or application packages, then the start time reflects the time at which + the task started downloading or deploying these. If the task has been + restarted or retried, this is the most recent time at which the task + started running. This property is present only for tasks that are in the + running or completed state. + :type start_time: datetime + :param end_time: The time at which the task completed. This property is + set only if the task is in the Completed state. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the task + command line. This property is set only if the task is in the completed + state. In general, the exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. + However, if the Batch service terminates the task (due to timeout, or user + termination via the API) you may see an operating system-defined exit + code. + :type exit_code: int + :param container_info: Information about the container under which the + task is executing. This property is set only if the task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the task failure, if any. This + property is set only if the task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the task could not be run) and + file upload errors are not retried. The Batch service will retry the task + up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the task + started running. This element is present only if the task was retried + (i.e. retryCount is nonzero). If present, this is typically the same as + startTime, but may be different if the task has been restarted for reasons + other than retry; for example, if the compute node was rebooted during a + retry, then the startTime is updated but the lastRetryTime is not. + :type last_retry_time: datetime + :param requeue_count: Required. The number of times the task has been + requeued by the Batch service as the result of a user request. When the + user removes nodes from a pool (by resizing/shrinking the pool) or when + the job is being disabled, the user can specify that running tasks on the + nodes be requeued for execution. This count tracks how many times the task + has been requeued for these reasons. + :type requeue_count: int + :param last_requeue_time: The most recent time at which the task has been + requeued by the Batch service as the result of a user request. This + property is set only if the requeueCount is nonzero. + :type last_requeue_time: datetime + :param result: The result of the task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'retry_count': {'required': True}, + 'requeue_count': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'requeue_count': {'key': 'requeueCount', 'type': 'int'}, + 'last_requeue_time': {'key': 'lastRequeueTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, **kwargs): + super(TaskExecutionInformation, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.exit_code = kwargs.get('exit_code', None) + self.container_info = kwargs.get('container_info', None) + self.failure_info = kwargs.get('failure_info', None) + self.retry_count = kwargs.get('retry_count', None) + self.last_retry_time = kwargs.get('last_retry_time', None) + self.requeue_count = kwargs.get('requeue_count', None) + self.last_requeue_time = kwargs.get('last_requeue_time', None) + self.result = kwargs.get('result', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_execution_information_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_execution_information_py3.py new file mode 100644 index 00000000..330bb6f4 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_execution_information_py3.py @@ -0,0 +1,109 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskExecutionInformation(Model): + """Information about the execution of a task. + + All required parameters must be populated in order to send to Azure. + + :param start_time: The time at which the task started running. 'Running' + corresponds to the running state, so if the task specifies resource files + or application packages, then the start time reflects the time at which + the task started downloading or deploying these. If the task has been + restarted or retried, this is the most recent time at which the task + started running. This property is present only for tasks that are in the + running or completed state. + :type start_time: datetime + :param end_time: The time at which the task completed. This property is + set only if the task is in the Completed state. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the task + command line. This property is set only if the task is in the completed + state. In general, the exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. + However, if the Batch service terminates the task (due to timeout, or user + termination via the API) you may see an operating system-defined exit + code. + :type exit_code: int + :param container_info: Information about the container under which the + task is executing. This property is set only if the task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the task failure, if any. This + property is set only if the task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the task could not be run) and + file upload errors are not retried. The Batch service will retry the task + up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the task + started running. This element is present only if the task was retried + (i.e. retryCount is nonzero). If present, this is typically the same as + startTime, but may be different if the task has been restarted for reasons + other than retry; for example, if the compute node was rebooted during a + retry, then the startTime is updated but the lastRetryTime is not. + :type last_retry_time: datetime + :param requeue_count: Required. The number of times the task has been + requeued by the Batch service as the result of a user request. When the + user removes nodes from a pool (by resizing/shrinking the pool) or when + the job is being disabled, the user can specify that running tasks on the + nodes be requeued for execution. This count tracks how many times the task + has been requeued for these reasons. + :type requeue_count: int + :param last_requeue_time: The most recent time at which the task has been + requeued by the Batch service as the result of a user request. This + property is set only if the requeueCount is nonzero. + :type last_requeue_time: datetime + :param result: The result of the task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'retry_count': {'required': True}, + 'requeue_count': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'requeue_count': {'key': 'requeueCount', 'type': 'int'}, + 'last_requeue_time': {'key': 'lastRequeueTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, *, retry_count: int, requeue_count: int, start_time=None, end_time=None, exit_code: int=None, container_info=None, failure_info=None, last_retry_time=None, last_requeue_time=None, result=None, **kwargs) -> None: + super(TaskExecutionInformation, self).__init__(**kwargs) + self.start_time = start_time + self.end_time = end_time + self.exit_code = exit_code + self.container_info = container_info + self.failure_info = failure_info + self.retry_count = retry_count + self.last_retry_time = last_retry_time + self.requeue_count = requeue_count + self.last_requeue_time = last_requeue_time + self.result = result diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_failure_information.py b/azext/generated/sdk/batch/v2018_08_01/models/task_failure_information.py new file mode 100644 index 00000000..fc6a45fc --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_failure_information.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskFailureInformation(Model): + """Information about a task failure. + + All required parameters must be populated in order to send to Azure. + + :param category: Required. The category of the task error. Possible values + include: 'userError', 'serverError' + :type category: str or ~azure.batch.models.ErrorCategory + :param code: An identifier for the task error. Codes are invariant and are + intended to be consumed programmatically. + :type code: str + :param message: A message describing the task error, intended to be + suitable for display in a user interface. + :type message: str + :param details: A list of additional details related to the error. + :type details: list[~azure.batch.models.NameValuePair] + """ + + _validation = { + 'category': {'required': True}, + } + + _attribute_map = { + 'category': {'key': 'category', 'type': 'ErrorCategory'}, + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(TaskFailureInformation, self).__init__(**kwargs) + self.category = kwargs.get('category', None) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.details = kwargs.get('details', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_failure_information_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_failure_information_py3.py new file mode 100644 index 00000000..b5eece45 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_failure_information_py3.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskFailureInformation(Model): + """Information about a task failure. + + All required parameters must be populated in order to send to Azure. + + :param category: Required. The category of the task error. Possible values + include: 'userError', 'serverError' + :type category: str or ~azure.batch.models.ErrorCategory + :param code: An identifier for the task error. Codes are invariant and are + intended to be consumed programmatically. + :type code: str + :param message: A message describing the task error, intended to be + suitable for display in a user interface. + :type message: str + :param details: A list of additional details related to the error. + :type details: list[~azure.batch.models.NameValuePair] + """ + + _validation = { + 'category': {'required': True}, + } + + _attribute_map = { + 'category': {'key': 'category', 'type': 'ErrorCategory'}, + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, category, code: str=None, message: str=None, details=None, **kwargs) -> None: + super(TaskFailureInformation, self).__init__(**kwargs) + self.category = category + self.code = code + self.message = message + self.details = details diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_get_options.py b/azext/generated/sdk/batch/v2018_08_01/models/task_get_options.py new file mode 100644 index 00000000..08c1fd8a --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_get_options.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_get_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_get_options_py3.py new file mode 100644 index 00000000..68699028 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_get_options_py3.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(TaskGetOptions, self).__init__(**kwargs) + self.select = select + self.expand = expand + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_id_range.py b/azext/generated/sdk/batch/v2018_08_01/models/task_id_range.py new file mode 100644 index 00000000..db30d858 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_id_range.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskIdRange(Model): + """A range of task IDs that a task can depend on. All tasks with IDs in the + range must complete successfully before the dependent task can be + scheduled. + + The start and end of the range are inclusive. For example, if a range has + start 9 and end 12, then it represents tasks '9', '10', '11' and '12'. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. The first task ID in the range. + :type start: int + :param end: Required. The last task ID in the range. + :type end: int + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'start', 'type': 'int'}, + 'end': {'key': 'end', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(TaskIdRange, self).__init__(**kwargs) + self.start = kwargs.get('start', None) + self.end = kwargs.get('end', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_id_range_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_id_range_py3.py new file mode 100644 index 00000000..446ed8ee --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_id_range_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskIdRange(Model): + """A range of task IDs that a task can depend on. All tasks with IDs in the + range must complete successfully before the dependent task can be + scheduled. + + The start and end of the range are inclusive. For example, if a range has + start 9 and end 12, then it represents tasks '9', '10', '11' and '12'. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. The first task ID in the range. + :type start: int + :param end: Required. The last task ID in the range. + :type end: int + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'start', 'type': 'int'}, + 'end': {'key': 'end', 'type': 'int'}, + } + + def __init__(self, *, start: int, end: int, **kwargs) -> None: + super(TaskIdRange, self).__init__(**kwargs) + self.start = start + self.end = end diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_information.py b/azext/generated/sdk/batch/v2018_08_01/models/task_information.py new file mode 100644 index 00000000..6e8ec0d1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_information.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskInformation(Model): + """Information about a task running on a compute node. + + All required parameters must be populated in order to send to Azure. + + :param task_url: The URL of the task. + :type task_url: str + :param job_id: The ID of the job to which the task belongs. + :type job_id: str + :param task_id: The ID of the task. + :type task_id: str + :param subtask_id: The ID of the subtask if the task is a multi-instance + task. + :type subtask_id: int + :param task_state: Required. The current state of the task. Possible + values include: 'active', 'preparing', 'running', 'completed' + :type task_state: str or ~azure.batch.models.TaskState + :param execution_info: Information about the execution of the task. + :type execution_info: ~azure.batch.models.TaskExecutionInformation + """ + + _validation = { + 'task_state': {'required': True}, + } + + _attribute_map = { + 'task_url': {'key': 'taskUrl', 'type': 'str'}, + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'task_id': {'key': 'taskId', 'type': 'str'}, + 'subtask_id': {'key': 'subtaskId', 'type': 'int'}, + 'task_state': {'key': 'taskState', 'type': 'TaskState'}, + 'execution_info': {'key': 'executionInfo', 'type': 'TaskExecutionInformation'}, + } + + def __init__(self, **kwargs): + super(TaskInformation, self).__init__(**kwargs) + self.task_url = kwargs.get('task_url', None) + self.job_id = kwargs.get('job_id', None) + self.task_id = kwargs.get('task_id', None) + self.subtask_id = kwargs.get('subtask_id', None) + self.task_state = kwargs.get('task_state', None) + self.execution_info = kwargs.get('execution_info', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_information_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_information_py3.py new file mode 100644 index 00000000..9406cba4 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_information_py3.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskInformation(Model): + """Information about a task running on a compute node. + + All required parameters must be populated in order to send to Azure. + + :param task_url: The URL of the task. + :type task_url: str + :param job_id: The ID of the job to which the task belongs. + :type job_id: str + :param task_id: The ID of the task. + :type task_id: str + :param subtask_id: The ID of the subtask if the task is a multi-instance + task. + :type subtask_id: int + :param task_state: Required. The current state of the task. Possible + values include: 'active', 'preparing', 'running', 'completed' + :type task_state: str or ~azure.batch.models.TaskState + :param execution_info: Information about the execution of the task. + :type execution_info: ~azure.batch.models.TaskExecutionInformation + """ + + _validation = { + 'task_state': {'required': True}, + } + + _attribute_map = { + 'task_url': {'key': 'taskUrl', 'type': 'str'}, + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'task_id': {'key': 'taskId', 'type': 'str'}, + 'subtask_id': {'key': 'subtaskId', 'type': 'int'}, + 'task_state': {'key': 'taskState', 'type': 'TaskState'}, + 'execution_info': {'key': 'executionInfo', 'type': 'TaskExecutionInformation'}, + } + + def __init__(self, *, task_state, task_url: str=None, job_id: str=None, task_id: str=None, subtask_id: int=None, execution_info=None, **kwargs) -> None: + super(TaskInformation, self).__init__(**kwargs) + self.task_url = task_url + self.job_id = job_id + self.task_id = task_id + self.subtask_id = subtask_id + self.task_state = task_state + self.execution_info = execution_info diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_list_options.py b/azext/generated/sdk/batch/v2018_08_01/models/task_list_options.py new file mode 100644 index 00000000..08c9cb00 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_list_options.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 tasks can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_list_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_list_options_py3.py new file mode 100644 index 00000000..bb02726e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_list_options_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 tasks can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(TaskListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.expand = expand + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_list_subtasks_options.py b/azext/generated/sdk/batch/v2018_08_01/models/task_list_subtasks_options.py new file mode 100644 index 00000000..8157cee2 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_list_subtasks_options.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskListSubtasksOptions(Model): + """Additional parameters for list_subtasks operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskListSubtasksOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_list_subtasks_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_list_subtasks_options_py3.py new file mode 100644 index 00000000..b8810800 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_list_subtasks_options_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskListSubtasksOptions(Model): + """Additional parameters for list_subtasks operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(TaskListSubtasksOptions, self).__init__(**kwargs) + self.select = select + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_reactivate_options.py b/azext/generated/sdk/batch/v2018_08_01/models/task_reactivate_options.py new file mode 100644 index 00000000..fe074611 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_reactivate_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskReactivateOptions(Model): + """Additional parameters for reactivate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskReactivateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_reactivate_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_reactivate_options_py3.py new file mode 100644 index 00000000..bd39d6c9 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_reactivate_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskReactivateOptions(Model): + """Additional parameters for reactivate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(TaskReactivateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_scheduling_policy.py b/azext/generated/sdk/batch/v2018_08_01/models/task_scheduling_policy.py new file mode 100644 index 00000000..2f121acb --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_scheduling_policy.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskSchedulingPolicy(Model): + """Specifies how tasks should be distributed across compute nodes. + + All required parameters must be populated in order to send to Azure. + + :param node_fill_type: Required. How tasks are distributed across compute + nodes in a pool. Possible values include: 'spread', 'pack' + :type node_fill_type: str or ~azure.batch.models.ComputeNodeFillType + """ + + _validation = { + 'node_fill_type': {'required': True}, + } + + _attribute_map = { + 'node_fill_type': {'key': 'nodeFillType', 'type': 'ComputeNodeFillType'}, + } + + def __init__(self, **kwargs): + super(TaskSchedulingPolicy, self).__init__(**kwargs) + self.node_fill_type = kwargs.get('node_fill_type', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_scheduling_policy_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_scheduling_policy_py3.py new file mode 100644 index 00000000..f3ff79a1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_scheduling_policy_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskSchedulingPolicy(Model): + """Specifies how tasks should be distributed across compute nodes. + + All required parameters must be populated in order to send to Azure. + + :param node_fill_type: Required. How tasks are distributed across compute + nodes in a pool. Possible values include: 'spread', 'pack' + :type node_fill_type: str or ~azure.batch.models.ComputeNodeFillType + """ + + _validation = { + 'node_fill_type': {'required': True}, + } + + _attribute_map = { + 'node_fill_type': {'key': 'nodeFillType', 'type': 'ComputeNodeFillType'}, + } + + def __init__(self, *, node_fill_type, **kwargs) -> None: + super(TaskSchedulingPolicy, self).__init__(**kwargs) + self.node_fill_type = node_fill_type diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_statistics.py b/azext/generated/sdk/batch/v2018_08_01/models/task_statistics.py new file mode 100644 index 00000000..b5f877fc --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_statistics.py @@ -0,0 +1,100 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskStatistics(Model): + """Resource usage statistics for a task. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all compute nodes) consumed by the task. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all compute nodes) consumed by the task. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of the task. + The wall clock time is the elapsed time from when the task started running + on a compute node to when it finished (or to the last time the statistics + were updated, if the task had not finished by then). If the task was + retried, this includes the wall clock time of all the task retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by the task. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by the task. + :type write_iops: long + :param read_io_gi_b: Required. The total gibibytes read from disk by the + task. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total gibibytes written to disk by the + task. + :type write_io_gi_b: float + :param wait_time: Required. The total wait time of the task. The wait time + for a task is defined as the elapsed time between the creation of the task + and the start of task execution. (If the task is retried due to failures, + the wait time is the time to the most recent task execution.). + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(TaskStatistics, self).__init__(**kwargs) + self.url = kwargs.get('url', None) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.user_cpu_time = kwargs.get('user_cpu_time', None) + self.kernel_cpu_time = kwargs.get('kernel_cpu_time', None) + self.wall_clock_time = kwargs.get('wall_clock_time', None) + self.read_iops = kwargs.get('read_iops', None) + self.write_iops = kwargs.get('write_iops', None) + self.read_io_gi_b = kwargs.get('read_io_gi_b', None) + self.write_io_gi_b = kwargs.get('write_io_gi_b', None) + self.wait_time = kwargs.get('wait_time', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_statistics_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_statistics_py3.py new file mode 100644 index 00000000..42de1dba --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_statistics_py3.py @@ -0,0 +1,100 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskStatistics(Model): + """Resource usage statistics for a task. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all compute nodes) consumed by the task. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all compute nodes) consumed by the task. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of the task. + The wall clock time is the elapsed time from when the task started running + on a compute node to when it finished (or to the last time the statistics + were updated, if the task had not finished by then). If the task was + retried, this includes the wall clock time of all the task retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by the task. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by the task. + :type write_iops: long + :param read_io_gi_b: Required. The total gibibytes read from disk by the + task. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total gibibytes written to disk by the + task. + :type write_io_gi_b: float + :param wait_time: Required. The total wait time of the task. The wait time + for a task is defined as the elapsed time between the creation of the task + and the start of task execution. (If the task is retried due to failures, + the wait time is the time to the most recent task execution.). + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, *, url: str, start_time, last_update_time, user_cpu_time, kernel_cpu_time, wall_clock_time, read_iops: int, write_iops: int, read_io_gi_b: float, write_io_gi_b: float, wait_time, **kwargs) -> None: + super(TaskStatistics, self).__init__(**kwargs) + self.url = url + self.start_time = start_time + self.last_update_time = last_update_time + self.user_cpu_time = user_cpu_time + self.kernel_cpu_time = kernel_cpu_time + self.wall_clock_time = wall_clock_time + self.read_iops = read_iops + self.write_iops = write_iops + self.read_io_gi_b = read_io_gi_b + self.write_io_gi_b = write_io_gi_b + self.wait_time = wait_time diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_terminate_options.py b/azext/generated/sdk/batch/v2018_08_01/models/task_terminate_options.py new file mode 100644 index 00000000..1908a9da --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_terminate_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskTerminateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_terminate_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_terminate_options_py3.py new file mode 100644 index 00000000..d967db3a --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_terminate_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(TaskTerminateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_update_options.py b/azext/generated/sdk/batch/v2018_08_01/models/task_update_options.py new file mode 100644 index 00000000..32e1ad82 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_update_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskUpdateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_update_options_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_update_options_py3.py new file mode 100644 index 00000000..2a20ddf5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_update_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(TaskUpdateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_update_parameter.py b/azext/generated/sdk/batch/v2018_08_01/models/task_update_parameter.py new file mode 100644 index 00000000..84246a43 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_update_parameter.py @@ -0,0 +1,30 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskUpdateParameter(Model): + """The set of changes to be made to a task. + + :param constraints: Constraints that apply to this task. If omitted, the + task is given the default constraints. For multi-instance tasks, updating + the retention time applies only to the primary task and not subtasks. + :type constraints: ~azure.batch.models.TaskConstraints + """ + + _attribute_map = { + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + } + + def __init__(self, **kwargs): + super(TaskUpdateParameter, self).__init__(**kwargs) + self.constraints = kwargs.get('constraints', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/task_update_parameter_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/task_update_parameter_py3.py new file mode 100644 index 00000000..71594e62 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/task_update_parameter_py3.py @@ -0,0 +1,30 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskUpdateParameter(Model): + """The set of changes to be made to a task. + + :param constraints: Constraints that apply to this task. If omitted, the + task is given the default constraints. For multi-instance tasks, updating + the retention time applies only to the primary task and not subtasks. + :type constraints: ~azure.batch.models.TaskConstraints + """ + + _attribute_map = { + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + } + + def __init__(self, *, constraints=None, **kwargs) -> None: + super(TaskUpdateParameter, self).__init__(**kwargs) + self.constraints = constraints diff --git a/azext/generated/sdk/batch/v2018_08_01/models/upload_batch_service_logs_configuration.py b/azext/generated/sdk/batch/v2018_08_01/models/upload_batch_service_logs_configuration.py new file mode 100644 index 00000000..1f96d326 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/upload_batch_service_logs_configuration.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UploadBatchServiceLogsConfiguration(Model): + """The Azure Batch service log files upload configuration for a compute node. + + All required parameters must be populated in order to send to Azure. + + :param container_url: Required. The URL of the container within Azure Blob + Storage to which to upload the Batch Service log file(s). The URL must + include a Shared Access Signature (SAS) granting write permissions to the + container. The SAS duration must allow enough time for the upload to + finish. The start time for SAS is optional and recommended to not be + specified. + :type container_url: str + :param start_time: Required. The start of the time range from which to + upload Batch Service log file(s). Any log file containing a log message in + the time range will be uploaded. This means that the operation might + retrieve more logs than have been requested since the entire log file is + always uploaded, but the operation should not retrieve fewer logs than + have been requested. + :type start_time: datetime + :param end_time: The end of the time range from which to upload Batch + Service log file(s). Any log file containing a log message in the time + range will be uploaded. This means that the operation might retrieve more + logs than have been requested since the entire log file is always + uploaded, but the operation should not retrieve fewer logs than have been + requested. If omitted, the default is to upload all logs available after + the startTime. + :type end_time: datetime + """ + + _validation = { + 'container_url': {'required': True}, + 'start_time': {'required': True}, + } + + _attribute_map = { + 'container_url': {'key': 'containerUrl', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(UploadBatchServiceLogsConfiguration, self).__init__(**kwargs) + self.container_url = kwargs.get('container_url', None) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/upload_batch_service_logs_configuration_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/upload_batch_service_logs_configuration_py3.py new file mode 100644 index 00000000..875beb60 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/upload_batch_service_logs_configuration_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UploadBatchServiceLogsConfiguration(Model): + """The Azure Batch service log files upload configuration for a compute node. + + All required parameters must be populated in order to send to Azure. + + :param container_url: Required. The URL of the container within Azure Blob + Storage to which to upload the Batch Service log file(s). The URL must + include a Shared Access Signature (SAS) granting write permissions to the + container. The SAS duration must allow enough time for the upload to + finish. The start time for SAS is optional and recommended to not be + specified. + :type container_url: str + :param start_time: Required. The start of the time range from which to + upload Batch Service log file(s). Any log file containing a log message in + the time range will be uploaded. This means that the operation might + retrieve more logs than have been requested since the entire log file is + always uploaded, but the operation should not retrieve fewer logs than + have been requested. + :type start_time: datetime + :param end_time: The end of the time range from which to upload Batch + Service log file(s). Any log file containing a log message in the time + range will be uploaded. This means that the operation might retrieve more + logs than have been requested since the entire log file is always + uploaded, but the operation should not retrieve fewer logs than have been + requested. If omitted, the default is to upload all logs available after + the startTime. + :type end_time: datetime + """ + + _validation = { + 'container_url': {'required': True}, + 'start_time': {'required': True}, + } + + _attribute_map = { + 'container_url': {'key': 'containerUrl', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + } + + def __init__(self, *, container_url: str, start_time, end_time=None, **kwargs) -> None: + super(UploadBatchServiceLogsConfiguration, self).__init__(**kwargs) + self.container_url = container_url + self.start_time = start_time + self.end_time = end_time diff --git a/azext/generated/sdk/batch/v2018_08_01/models/upload_batch_service_logs_result.py b/azext/generated/sdk/batch/v2018_08_01/models/upload_batch_service_logs_result.py new file mode 100644 index 00000000..a2d5a0fe --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/upload_batch_service_logs_result.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UploadBatchServiceLogsResult(Model): + """The result of uploading Batch service log files from a specific compute + node. + + All required parameters must be populated in order to send to Azure. + + :param virtual_directory_name: Required. The virtual directory within + Azure Blob Storage container to which the Batch Service log file(s) will + be uploaded. The virtual directory name is part of the blob name for each + log file uploaded, and it is built based poolId, nodeId and a unique + identifier. + :type virtual_directory_name: str + :param number_of_files_uploaded: Required. The number of log files which + will be uploaded. + :type number_of_files_uploaded: int + """ + + _validation = { + 'virtual_directory_name': {'required': True}, + 'number_of_files_uploaded': {'required': True}, + } + + _attribute_map = { + 'virtual_directory_name': {'key': 'virtualDirectoryName', 'type': 'str'}, + 'number_of_files_uploaded': {'key': 'numberOfFilesUploaded', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(UploadBatchServiceLogsResult, self).__init__(**kwargs) + self.virtual_directory_name = kwargs.get('virtual_directory_name', None) + self.number_of_files_uploaded = kwargs.get('number_of_files_uploaded', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/upload_batch_service_logs_result_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/upload_batch_service_logs_result_py3.py new file mode 100644 index 00000000..f9547bc2 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/upload_batch_service_logs_result_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UploadBatchServiceLogsResult(Model): + """The result of uploading Batch service log files from a specific compute + node. + + All required parameters must be populated in order to send to Azure. + + :param virtual_directory_name: Required. The virtual directory within + Azure Blob Storage container to which the Batch Service log file(s) will + be uploaded. The virtual directory name is part of the blob name for each + log file uploaded, and it is built based poolId, nodeId and a unique + identifier. + :type virtual_directory_name: str + :param number_of_files_uploaded: Required. The number of log files which + will be uploaded. + :type number_of_files_uploaded: int + """ + + _validation = { + 'virtual_directory_name': {'required': True}, + 'number_of_files_uploaded': {'required': True}, + } + + _attribute_map = { + 'virtual_directory_name': {'key': 'virtualDirectoryName', 'type': 'str'}, + 'number_of_files_uploaded': {'key': 'numberOfFilesUploaded', 'type': 'int'}, + } + + def __init__(self, *, virtual_directory_name: str, number_of_files_uploaded: int, **kwargs) -> None: + super(UploadBatchServiceLogsResult, self).__init__(**kwargs) + self.virtual_directory_name = virtual_directory_name + self.number_of_files_uploaded = number_of_files_uploaded diff --git a/azext/generated/sdk/batch/v2018_08_01/models/usage_statistics.py b/azext/generated/sdk/batch/v2018_08_01/models/usage_statistics.py new file mode 100644 index 00000000..08d709ad --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/usage_statistics.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UsageStatistics(Model): + """Statistics related to pool usage information. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param dedicated_core_time: Required. The aggregated wall-clock time of + the dedicated compute node cores being part of the pool. + :type dedicated_core_time: timedelta + """ + + _validation = { + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'dedicated_core_time': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'dedicated_core_time': {'key': 'dedicatedCoreTime', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(UsageStatistics, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.dedicated_core_time = kwargs.get('dedicated_core_time', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/usage_statistics_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/usage_statistics_py3.py new file mode 100644 index 00000000..9fafd25d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/usage_statistics_py3.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UsageStatistics(Model): + """Statistics related to pool usage information. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param dedicated_core_time: Required. The aggregated wall-clock time of + the dedicated compute node cores being part of the pool. + :type dedicated_core_time: timedelta + """ + + _validation = { + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'dedicated_core_time': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'dedicated_core_time': {'key': 'dedicatedCoreTime', 'type': 'duration'}, + } + + def __init__(self, *, start_time, last_update_time, dedicated_core_time, **kwargs) -> None: + super(UsageStatistics, self).__init__(**kwargs) + self.start_time = start_time + self.last_update_time = last_update_time + self.dedicated_core_time = dedicated_core_time diff --git a/azext/generated/sdk/batch/v2018_08_01/models/user_account.py b/azext/generated/sdk/batch/v2018_08_01/models/user_account.py new file mode 100644 index 00000000..e630e5ec --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/user_account.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UserAccount(Model): + """Properties used to create a user used to execute tasks on an Azure Batch + node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the user account. + :type name: str + :param password: Required. The password for the user account. + :type password: str + :param elevation_level: The elevation level of the user account. The + default value is nonAdmin. Possible values include: 'nonAdmin', 'admin' + :type elevation_level: str or ~azure.batch.models.ElevationLevel + :param linux_user_configuration: The Linux-specific user configuration for + the user account. This property is ignored if specified on a Windows pool. + If not specified, the user is created with the default options. + :type linux_user_configuration: ~azure.batch.models.LinuxUserConfiguration + """ + + _validation = { + 'name': {'required': True}, + 'password': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, + 'linux_user_configuration': {'key': 'linuxUserConfiguration', 'type': 'LinuxUserConfiguration'}, + } + + def __init__(self, **kwargs): + super(UserAccount, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.password = kwargs.get('password', None) + self.elevation_level = kwargs.get('elevation_level', None) + self.linux_user_configuration = kwargs.get('linux_user_configuration', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/user_account_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/user_account_py3.py new file mode 100644 index 00000000..33a2d369 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/user_account_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UserAccount(Model): + """Properties used to create a user used to execute tasks on an Azure Batch + node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the user account. + :type name: str + :param password: Required. The password for the user account. + :type password: str + :param elevation_level: The elevation level of the user account. The + default value is nonAdmin. Possible values include: 'nonAdmin', 'admin' + :type elevation_level: str or ~azure.batch.models.ElevationLevel + :param linux_user_configuration: The Linux-specific user configuration for + the user account. This property is ignored if specified on a Windows pool. + If not specified, the user is created with the default options. + :type linux_user_configuration: ~azure.batch.models.LinuxUserConfiguration + """ + + _validation = { + 'name': {'required': True}, + 'password': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, + 'linux_user_configuration': {'key': 'linuxUserConfiguration', 'type': 'LinuxUserConfiguration'}, + } + + def __init__(self, *, name: str, password: str, elevation_level=None, linux_user_configuration=None, **kwargs) -> None: + super(UserAccount, self).__init__(**kwargs) + self.name = name + self.password = password + self.elevation_level = elevation_level + self.linux_user_configuration = linux_user_configuration diff --git a/azext/generated/sdk/batch/v2018_08_01/models/user_identity.py b/azext/generated/sdk/batch/v2018_08_01/models/user_identity.py new file mode 100644 index 00000000..b75dfd73 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/user_identity.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UserIdentity(Model): + """The definition of the user identity under which the task is run. + + Specify either the userName or autoUser property, but not both. On + CloudServiceConfiguration pools, this user is logged in with the + INTERACTIVE flag. On Windows VirtualMachineConfiguration pools, this user + is logged in with the BATCH flag. + + :param user_name: The name of the user identity under which the task is + run. The userName and autoUser properties are mutually exclusive; you must + specify one but not both. + :type user_name: str + :param auto_user: The auto user under which the task is run. The userName + and autoUser properties are mutually exclusive; you must specify one but + not both. + :type auto_user: ~azure.batch.models.AutoUserSpecification + """ + + _attribute_map = { + 'user_name': {'key': 'username', 'type': 'str'}, + 'auto_user': {'key': 'autoUser', 'type': 'AutoUserSpecification'}, + } + + def __init__(self, **kwargs): + super(UserIdentity, self).__init__(**kwargs) + self.user_name = kwargs.get('user_name', None) + self.auto_user = kwargs.get('auto_user', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/user_identity_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/user_identity_py3.py new file mode 100644 index 00000000..e566f58c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/user_identity_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UserIdentity(Model): + """The definition of the user identity under which the task is run. + + Specify either the userName or autoUser property, but not both. On + CloudServiceConfiguration pools, this user is logged in with the + INTERACTIVE flag. On Windows VirtualMachineConfiguration pools, this user + is logged in with the BATCH flag. + + :param user_name: The name of the user identity under which the task is + run. The userName and autoUser properties are mutually exclusive; you must + specify one but not both. + :type user_name: str + :param auto_user: The auto user under which the task is run. The userName + and autoUser properties are mutually exclusive; you must specify one but + not both. + :type auto_user: ~azure.batch.models.AutoUserSpecification + """ + + _attribute_map = { + 'user_name': {'key': 'username', 'type': 'str'}, + 'auto_user': {'key': 'autoUser', 'type': 'AutoUserSpecification'}, + } + + def __init__(self, *, user_name: str=None, auto_user=None, **kwargs) -> None: + super(UserIdentity, self).__init__(**kwargs) + self.user_name = user_name + self.auto_user = auto_user diff --git a/azext/generated/sdk/batch/v2018_08_01/models/virtual_machine_configuration.py b/azext/generated/sdk/batch/v2018_08_01/models/virtual_machine_configuration.py new file mode 100644 index 00000000..ebf7f7d6 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/virtual_machine_configuration.py @@ -0,0 +1,92 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class VirtualMachineConfiguration(Model): + """The configuration for compute nodes in a pool based on the Azure Virtual + Machines infrastructure. + + All required parameters must be populated in order to send to Azure. + + :param image_reference: Required. A reference to the Azure Virtual + Machines Marketplace image or the custom Virtual Machine image to use. + :type image_reference: ~azure.batch.models.ImageReference + :param os_disk: Settings for the operating system disk of the Virtual + Machine. + :type os_disk: ~azure.batch.models.OSDisk + :param node_agent_sku_id: Required. The SKU of the Batch node agent to be + provisioned on compute nodes in the pool. The Batch node agent is a + program that runs on each node in the pool, and provides the + command-and-control interface between the node and the Batch service. + There are different implementations of the node agent, known as SKUs, for + different operating systems. You must specify a node agent SKU which + matches the selected image reference. To get the list of supported node + agent SKUs along with their list of verified image references, see the + 'List supported node agent SKUs' operation. + :type node_agent_sku_id: str + :param windows_configuration: Windows operating system settings on the + virtual machine. This property must not be specified if the imageReference + or osDisk property specifies a Linux OS image. + :type windows_configuration: ~azure.batch.models.WindowsConfiguration + :param data_disks: The configuration for data disks attached to the + compute nodes in the pool. This property must be specified if the compute + nodes in the pool need to have empty data disks attached to them. This + cannot be updated. Each node gets its own disk (the disk is not a file + share). Existing disks cannot be attached, each attached disk is empty. + When the node is removed from the pool, the disk and all data associated + with it is also deleted. The disk is not formatted after being attached, + it must be formatted before use - for more information see + https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux + and + https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. + :type data_disks: list[~azure.batch.models.DataDisk] + :param license_type: The type of on-premises license to be used when + deploying the operating system. This only applies to images that contain + the Windows operating system, and should only be used when you hold valid + on-premises licenses for the nodes which will be deployed. If omitted, no + on-premises licensing discount is applied. Values are: + Windows_Server - The on-premises license is for Windows Server. + Windows_Client - The on-premises license is for Windows Client. + :type license_type: str + :param container_configuration: The container configuration for the pool. + If specified, setup is performed on each node in the pool to allow tasks + to run in containers. All regular tasks and job manager tasks run on this + pool must specify the containerSettings property, and all other tasks may + specify it. + :type container_configuration: ~azure.batch.models.ContainerConfiguration + """ + + _validation = { + 'image_reference': {'required': True}, + 'node_agent_sku_id': {'required': True}, + } + + _attribute_map = { + 'image_reference': {'key': 'imageReference', 'type': 'ImageReference'}, + 'os_disk': {'key': 'osDisk', 'type': 'OSDisk'}, + 'node_agent_sku_id': {'key': 'nodeAgentSKUId', 'type': 'str'}, + 'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'}, + 'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'}, + 'license_type': {'key': 'licenseType', 'type': 'str'}, + 'container_configuration': {'key': 'containerConfiguration', 'type': 'ContainerConfiguration'}, + } + + def __init__(self, **kwargs): + super(VirtualMachineConfiguration, self).__init__(**kwargs) + self.image_reference = kwargs.get('image_reference', None) + self.os_disk = kwargs.get('os_disk', None) + self.node_agent_sku_id = kwargs.get('node_agent_sku_id', None) + self.windows_configuration = kwargs.get('windows_configuration', None) + self.data_disks = kwargs.get('data_disks', None) + self.license_type = kwargs.get('license_type', None) + self.container_configuration = kwargs.get('container_configuration', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/virtual_machine_configuration_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/virtual_machine_configuration_py3.py new file mode 100644 index 00000000..c38af5c7 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/virtual_machine_configuration_py3.py @@ -0,0 +1,92 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class VirtualMachineConfiguration(Model): + """The configuration for compute nodes in a pool based on the Azure Virtual + Machines infrastructure. + + All required parameters must be populated in order to send to Azure. + + :param image_reference: Required. A reference to the Azure Virtual + Machines Marketplace image or the custom Virtual Machine image to use. + :type image_reference: ~azure.batch.models.ImageReference + :param os_disk: Settings for the operating system disk of the Virtual + Machine. + :type os_disk: ~azure.batch.models.OSDisk + :param node_agent_sku_id: Required. The SKU of the Batch node agent to be + provisioned on compute nodes in the pool. The Batch node agent is a + program that runs on each node in the pool, and provides the + command-and-control interface between the node and the Batch service. + There are different implementations of the node agent, known as SKUs, for + different operating systems. You must specify a node agent SKU which + matches the selected image reference. To get the list of supported node + agent SKUs along with their list of verified image references, see the + 'List supported node agent SKUs' operation. + :type node_agent_sku_id: str + :param windows_configuration: Windows operating system settings on the + virtual machine. This property must not be specified if the imageReference + or osDisk property specifies a Linux OS image. + :type windows_configuration: ~azure.batch.models.WindowsConfiguration + :param data_disks: The configuration for data disks attached to the + compute nodes in the pool. This property must be specified if the compute + nodes in the pool need to have empty data disks attached to them. This + cannot be updated. Each node gets its own disk (the disk is not a file + share). Existing disks cannot be attached, each attached disk is empty. + When the node is removed from the pool, the disk and all data associated + with it is also deleted. The disk is not formatted after being attached, + it must be formatted before use - for more information see + https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux + and + https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. + :type data_disks: list[~azure.batch.models.DataDisk] + :param license_type: The type of on-premises license to be used when + deploying the operating system. This only applies to images that contain + the Windows operating system, and should only be used when you hold valid + on-premises licenses for the nodes which will be deployed. If omitted, no + on-premises licensing discount is applied. Values are: + Windows_Server - The on-premises license is for Windows Server. + Windows_Client - The on-premises license is for Windows Client. + :type license_type: str + :param container_configuration: The container configuration for the pool. + If specified, setup is performed on each node in the pool to allow tasks + to run in containers. All regular tasks and job manager tasks run on this + pool must specify the containerSettings property, and all other tasks may + specify it. + :type container_configuration: ~azure.batch.models.ContainerConfiguration + """ + + _validation = { + 'image_reference': {'required': True}, + 'node_agent_sku_id': {'required': True}, + } + + _attribute_map = { + 'image_reference': {'key': 'imageReference', 'type': 'ImageReference'}, + 'os_disk': {'key': 'osDisk', 'type': 'OSDisk'}, + 'node_agent_sku_id': {'key': 'nodeAgentSKUId', 'type': 'str'}, + 'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'}, + 'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'}, + 'license_type': {'key': 'licenseType', 'type': 'str'}, + 'container_configuration': {'key': 'containerConfiguration', 'type': 'ContainerConfiguration'}, + } + + def __init__(self, *, image_reference, node_agent_sku_id: str, os_disk=None, windows_configuration=None, data_disks=None, license_type: str=None, container_configuration=None, **kwargs) -> None: + super(VirtualMachineConfiguration, self).__init__(**kwargs) + self.image_reference = image_reference + self.os_disk = os_disk + self.node_agent_sku_id = node_agent_sku_id + self.windows_configuration = windows_configuration + self.data_disks = data_disks + self.license_type = license_type + self.container_configuration = container_configuration diff --git a/azext/generated/sdk/batch/v2018_08_01/models/windows_configuration.py b/azext/generated/sdk/batch/v2018_08_01/models/windows_configuration.py new file mode 100644 index 00000000..6b27533d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/windows_configuration.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class WindowsConfiguration(Model): + """Windows operating system settings to apply to the virtual machine. + + :param enable_automatic_updates: Whether automatic updates are enabled on + the virtual machine. If omitted, the default value is true. + :type enable_automatic_updates: bool + """ + + _attribute_map = { + 'enable_automatic_updates': {'key': 'enableAutomaticUpdates', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(WindowsConfiguration, self).__init__(**kwargs) + self.enable_automatic_updates = kwargs.get('enable_automatic_updates', None) diff --git a/azext/generated/sdk/batch/v2018_08_01/models/windows_configuration_py3.py b/azext/generated/sdk/batch/v2018_08_01/models/windows_configuration_py3.py new file mode 100644 index 00000000..40a4aedf --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/models/windows_configuration_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class WindowsConfiguration(Model): + """Windows operating system settings to apply to the virtual machine. + + :param enable_automatic_updates: Whether automatic updates are enabled on + the virtual machine. If omitted, the default value is true. + :type enable_automatic_updates: bool + """ + + _attribute_map = { + 'enable_automatic_updates': {'key': 'enableAutomaticUpdates', 'type': 'bool'}, + } + + def __init__(self, *, enable_automatic_updates: bool=None, **kwargs) -> None: + super(WindowsConfiguration, self).__init__(**kwargs) + self.enable_automatic_updates = enable_automatic_updates diff --git a/azext/generated/sdk/batch/v2018_08_01/operations/__init__.py b/azext/generated/sdk/batch/v2018_08_01/operations/__init__.py new file mode 100644 index 00000000..5b1c54cc --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/operations/__init__.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_operations import ApplicationOperations +from .pool_operations import PoolOperations +from .account_operations import AccountOperations +from .job_operations import JobOperations +from .certificate_operations import CertificateOperations +from .file_operations import FileOperations +from .job_schedule_operations import JobScheduleOperations +from .task_operations import TaskOperations +from .compute_node_operations import ComputeNodeOperations + +__all__ = [ + 'ApplicationOperations', + 'PoolOperations', + 'AccountOperations', + 'JobOperations', + 'CertificateOperations', + 'FileOperations', + 'JobScheduleOperations', + 'TaskOperations', + 'ComputeNodeOperations', +] diff --git a/azext/generated/sdk/batch/v2018_08_01/operations/account_operations.py b/azext/generated/sdk/batch/v2018_08_01/operations/account_operations.py new file mode 100644 index 00000000..49aa0879 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/operations/account_operations.py @@ -0,0 +1,233 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class AccountOperations(object): + """AccountOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2018-08-01.7.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2018-08-01.7.0" + + self.config = config + + def list_node_agent_skus( + self, account_list_node_agent_skus_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all node agent SKUs supported by the Azure Batch service. + + :param account_list_node_agent_skus_options: Additional parameters for + the operation + :type account_list_node_agent_skus_options: + ~azure.batch.models.AccountListNodeAgentSkusOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of NodeAgentSku + :rtype: + ~azure.batch.models.NodeAgentSkuPaged[~azure.batch.models.NodeAgentSku] + :raises: + :class:`BatchErrorException` + """ + filter = None + if account_list_node_agent_skus_options is not None: + filter = account_list_node_agent_skus_options.filter + max_results = None + if account_list_node_agent_skus_options is not None: + max_results = account_list_node_agent_skus_options.max_results + timeout = None + if account_list_node_agent_skus_options is not None: + timeout = account_list_node_agent_skus_options.timeout + client_request_id = None + if account_list_node_agent_skus_options is not None: + client_request_id = account_list_node_agent_skus_options.client_request_id + return_client_request_id = None + if account_list_node_agent_skus_options is not None: + return_client_request_id = account_list_node_agent_skus_options.return_client_request_id + ocp_date = None + if account_list_node_agent_skus_options is not None: + ocp_date = account_list_node_agent_skus_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_node_agent_skus.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.NodeAgentSkuPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.NodeAgentSkuPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_node_agent_skus.metadata = {'url': '/nodeagentskus'} + + def list_pool_node_counts( + self, account_list_pool_node_counts_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the number of nodes in each state, grouped by pool. + + :param account_list_pool_node_counts_options: Additional parameters + for the operation + :type account_list_pool_node_counts_options: + ~azure.batch.models.AccountListPoolNodeCountsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of PoolNodeCounts + :rtype: + ~azure.batch.models.PoolNodeCountsPaged[~azure.batch.models.PoolNodeCounts] + :raises: + :class:`BatchErrorException` + """ + filter = None + if account_list_pool_node_counts_options is not None: + filter = account_list_pool_node_counts_options.filter + max_results = None + if account_list_pool_node_counts_options is not None: + max_results = account_list_pool_node_counts_options.max_results + timeout = None + if account_list_pool_node_counts_options is not None: + timeout = account_list_pool_node_counts_options.timeout + client_request_id = None + if account_list_pool_node_counts_options is not None: + client_request_id = account_list_pool_node_counts_options.client_request_id + return_client_request_id = None + if account_list_pool_node_counts_options is not None: + return_client_request_id = account_list_pool_node_counts_options.return_client_request_id + ocp_date = None + if account_list_pool_node_counts_options is not None: + ocp_date = account_list_pool_node_counts_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_pool_node_counts.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=10, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.PoolNodeCountsPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.PoolNodeCountsPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_pool_node_counts.metadata = {'url': '/nodecounts'} diff --git a/azext/generated/sdk/batch/v2018_08_01/operations/application_operations.py b/azext/generated/sdk/batch/v2018_08_01/operations/application_operations.py new file mode 100644 index 00000000..234d4b9e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/operations/application_operations.py @@ -0,0 +1,233 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class ApplicationOperations(object): + """ApplicationOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2018-08-01.7.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2018-08-01.7.0" + + self.config = config + + def list( + self, application_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the applications available in the specified account. + + This operation returns only applications and versions that are + available for use on compute nodes; that is, that can be used in an + application package reference. For administrator information about + applications and versions that are not yet available to compute nodes, + use the Azure portal or the Azure Resource Manager API. + + :param application_list_options: Additional parameters for the + operation + :type application_list_options: + ~azure.batch.models.ApplicationListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of ApplicationSummary + :rtype: + ~azure.batch.models.ApplicationSummaryPaged[~azure.batch.models.ApplicationSummary] + :raises: + :class:`BatchErrorException` + """ + max_results = None + if application_list_options is not None: + max_results = application_list_options.max_results + timeout = None + if application_list_options is not None: + timeout = application_list_options.timeout + client_request_id = None + if application_list_options is not None: + client_request_id = application_list_options.client_request_id + return_client_request_id = None + if application_list_options is not None: + return_client_request_id = application_list_options.return_client_request_id + ocp_date = None + if application_list_options is not None: + ocp_date = application_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.ApplicationSummaryPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.ApplicationSummaryPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/applications'} + + def get( + self, application_id, application_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified application. + + This operation returns only applications and versions that are + available for use on compute nodes; that is, that can be used in an + application package reference. For administrator information about + applications and versions that are not yet available to compute nodes, + use the Azure portal or the Azure Resource Manager API. + + :param application_id: The ID of the application. + :type application_id: str + :param application_get_options: Additional parameters for the + operation + :type application_get_options: + ~azure.batch.models.ApplicationGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ApplicationSummary or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.ApplicationSummary or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if application_get_options is not None: + timeout = application_get_options.timeout + client_request_id = None + if application_get_options is not None: + client_request_id = application_get_options.client_request_id + return_client_request_id = None + if application_get_options is not None: + return_client_request_id = application_get_options.return_client_request_id + ocp_date = None + if application_get_options is not None: + ocp_date = application_get_options.ocp_date + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'applicationId': self._serialize.url("application_id", application_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('ApplicationSummary', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/applications/{applicationId}'} diff --git a/azext/generated/sdk/batch/v2018_08_01/operations/certificate_operations.py b/azext/generated/sdk/batch/v2018_08_01/operations/certificate_operations.py new file mode 100644 index 00000000..774d29af --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/operations/certificate_operations.py @@ -0,0 +1,515 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class CertificateOperations(object): + """CertificateOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2018-08-01.7.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2018-08-01.7.0" + + self.config = config + + def add( + self, certificate, certificate_add_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a certificate to the specified account. + + :param certificate: The certificate to be added. + :type certificate: ~azure.batch.models.CertificateAddParameter + :param certificate_add_options: Additional parameters for the + operation + :type certificate_add_options: + ~azure.batch.models.CertificateAddOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if certificate_add_options is not None: + timeout = certificate_add_options.timeout + client_request_id = None + if certificate_add_options is not None: + client_request_id = certificate_add_options.client_request_id + return_client_request_id = None + if certificate_add_options is not None: + return_client_request_id = certificate_add_options.return_client_request_id + ocp_date = None + if certificate_add_options is not None: + ocp_date = certificate_add_options.ocp_date + + # Construct URL + url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(certificate, 'CertificateAddParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add.metadata = {'url': '/certificates'} + + def list( + self, certificate_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the certificates that have been added to the specified + account. + + :param certificate_list_options: Additional parameters for the + operation + :type certificate_list_options: + ~azure.batch.models.CertificateListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of Certificate + :rtype: + ~azure.batch.models.CertificatePaged[~azure.batch.models.Certificate] + :raises: + :class:`BatchErrorException` + """ + filter = None + if certificate_list_options is not None: + filter = certificate_list_options.filter + select = None + if certificate_list_options is not None: + select = certificate_list_options.select + max_results = None + if certificate_list_options is not None: + max_results = certificate_list_options.max_results + timeout = None + if certificate_list_options is not None: + timeout = certificate_list_options.timeout + client_request_id = None + if certificate_list_options is not None: + client_request_id = certificate_list_options.client_request_id + return_client_request_id = None + if certificate_list_options is not None: + return_client_request_id = certificate_list_options.return_client_request_id + ocp_date = None + if certificate_list_options is not None: + ocp_date = certificate_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CertificatePaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CertificatePaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/certificates'} + + def cancel_deletion( + self, thumbprint_algorithm, thumbprint, certificate_cancel_deletion_options=None, custom_headers=None, raw=False, **operation_config): + """Cancels a failed deletion of a certificate from the specified account. + + If you try to delete a certificate that is being used by a pool or + compute node, the status of the certificate changes to deleteFailed. If + you decide that you want to continue using the certificate, you can use + this operation to set the status of the certificate back to active. If + you intend to delete the certificate, you do not need to run this + operation after the deletion failed. You must make sure that the + certificate is not being used by any resources, and then you can try + again to delete the certificate. + + :param thumbprint_algorithm: The algorithm used to derive the + thumbprint parameter. This must be sha1. + :type thumbprint_algorithm: str + :param thumbprint: The thumbprint of the certificate being deleted. + :type thumbprint: str + :param certificate_cancel_deletion_options: Additional parameters for + the operation + :type certificate_cancel_deletion_options: + ~azure.batch.models.CertificateCancelDeletionOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if certificate_cancel_deletion_options is not None: + timeout = certificate_cancel_deletion_options.timeout + client_request_id = None + if certificate_cancel_deletion_options is not None: + client_request_id = certificate_cancel_deletion_options.client_request_id + return_client_request_id = None + if certificate_cancel_deletion_options is not None: + return_client_request_id = certificate_cancel_deletion_options.return_client_request_id + ocp_date = None + if certificate_cancel_deletion_options is not None: + ocp_date = certificate_cancel_deletion_options.ocp_date + + # Construct URL + url = self.cancel_deletion.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'thumbprintAlgorithm': self._serialize.url("thumbprint_algorithm", thumbprint_algorithm, 'str'), + 'thumbprint': self._serialize.url("thumbprint", thumbprint, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + cancel_deletion.metadata = {'url': '/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})/canceldelete'} + + def delete( + self, thumbprint_algorithm, thumbprint, certificate_delete_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a certificate from the specified account. + + You cannot delete a certificate if a resource (pool or compute node) is + using it. Before you can delete a certificate, you must therefore make + sure that the certificate is not associated with any existing pools, + the certificate is not installed on any compute nodes (even if you + remove a certificate from a pool, it is not removed from existing + compute nodes in that pool until they restart), and no running tasks + depend on the certificate. If you try to delete a certificate that is + in use, the deletion fails. The certificate status changes to + deleteFailed. You can use Cancel Delete Certificate to set the status + back to active if you decide that you want to continue using the + certificate. + + :param thumbprint_algorithm: The algorithm used to derive the + thumbprint parameter. This must be sha1. + :type thumbprint_algorithm: str + :param thumbprint: The thumbprint of the certificate to be deleted. + :type thumbprint: str + :param certificate_delete_options: Additional parameters for the + operation + :type certificate_delete_options: + ~azure.batch.models.CertificateDeleteOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if certificate_delete_options is not None: + timeout = certificate_delete_options.timeout + client_request_id = None + if certificate_delete_options is not None: + client_request_id = certificate_delete_options.client_request_id + return_client_request_id = None + if certificate_delete_options is not None: + return_client_request_id = certificate_delete_options.return_client_request_id + ocp_date = None + if certificate_delete_options is not None: + ocp_date = certificate_delete_options.ocp_date + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'thumbprintAlgorithm': self._serialize.url("thumbprint_algorithm", thumbprint_algorithm, 'str'), + 'thumbprint': self._serialize.url("thumbprint", thumbprint, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + }) + return client_raw_response + delete.metadata = {'url': '/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})'} + + def get( + self, thumbprint_algorithm, thumbprint, certificate_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified certificate. + + :param thumbprint_algorithm: The algorithm used to derive the + thumbprint parameter. This must be sha1. + :type thumbprint_algorithm: str + :param thumbprint: The thumbprint of the certificate to get. + :type thumbprint: str + :param certificate_get_options: Additional parameters for the + operation + :type certificate_get_options: + ~azure.batch.models.CertificateGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: Certificate or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.Certificate or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if certificate_get_options is not None: + select = certificate_get_options.select + timeout = None + if certificate_get_options is not None: + timeout = certificate_get_options.timeout + client_request_id = None + if certificate_get_options is not None: + client_request_id = certificate_get_options.client_request_id + return_client_request_id = None + if certificate_get_options is not None: + return_client_request_id = certificate_get_options.return_client_request_id + ocp_date = None + if certificate_get_options is not None: + ocp_date = certificate_get_options.ocp_date + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'thumbprintAlgorithm': self._serialize.url("thumbprint_algorithm", thumbprint_algorithm, 'str'), + 'thumbprint': self._serialize.url("thumbprint", thumbprint, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('Certificate', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})'} diff --git a/azext/generated/sdk/batch/v2018_08_01/operations/compute_node_operations.py b/azext/generated/sdk/batch/v2018_08_01/operations/compute_node_operations.py new file mode 100644 index 00000000..04fd8f96 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/operations/compute_node_operations.py @@ -0,0 +1,1239 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class ComputeNodeOperations(object): + """ComputeNodeOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2018-08-01.7.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2018-08-01.7.0" + + self.config = config + + def add_user( + self, pool_id, node_id, user, compute_node_add_user_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a user account to the specified compute node. + + You can add a user account to a node only when it is in the idle or + running state. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the machine on which you want to create a + user account. + :type node_id: str + :param user: The user account to be created. + :type user: ~azure.batch.models.ComputeNodeUser + :param compute_node_add_user_options: Additional parameters for the + operation + :type compute_node_add_user_options: + ~azure.batch.models.ComputeNodeAddUserOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_add_user_options is not None: + timeout = compute_node_add_user_options.timeout + client_request_id = None + if compute_node_add_user_options is not None: + client_request_id = compute_node_add_user_options.client_request_id + return_client_request_id = None + if compute_node_add_user_options is not None: + return_client_request_id = compute_node_add_user_options.return_client_request_id + ocp_date = None + if compute_node_add_user_options is not None: + ocp_date = compute_node_add_user_options.ocp_date + + # Construct URL + url = self.add_user.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(user, 'ComputeNodeUser') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add_user.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/users'} + + def delete_user( + self, pool_id, node_id, user_name, compute_node_delete_user_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a user account from the specified compute node. + + You can delete a user account to a node only when it is in the idle or + running state. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the machine on which you want to delete a + user account. + :type node_id: str + :param user_name: The name of the user account to delete. + :type user_name: str + :param compute_node_delete_user_options: Additional parameters for the + operation + :type compute_node_delete_user_options: + ~azure.batch.models.ComputeNodeDeleteUserOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_delete_user_options is not None: + timeout = compute_node_delete_user_options.timeout + client_request_id = None + if compute_node_delete_user_options is not None: + client_request_id = compute_node_delete_user_options.client_request_id + return_client_request_id = None + if compute_node_delete_user_options is not None: + return_client_request_id = compute_node_delete_user_options.return_client_request_id + ocp_date = None + if compute_node_delete_user_options is not None: + ocp_date = compute_node_delete_user_options.ocp_date + + # Construct URL + url = self.delete_user.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str'), + 'userName': self._serialize.url("user_name", user_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete_user.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/users/{userName}'} + + def update_user( + self, pool_id, node_id, user_name, node_update_user_parameter, compute_node_update_user_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the password and expiration time of a user account on the + specified compute node. + + This operation replaces of all the updatable properties of the account. + For example, if the expiryTime element is not specified, the current + value is replaced with the default value, not left unmodified. You can + update a user account on a node only when it is in the idle or running + state. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the machine on which you want to update a + user account. + :type node_id: str + :param user_name: The name of the user account to update. + :type user_name: str + :param node_update_user_parameter: The parameters for the request. + :type node_update_user_parameter: + ~azure.batch.models.NodeUpdateUserParameter + :param compute_node_update_user_options: Additional parameters for the + operation + :type compute_node_update_user_options: + ~azure.batch.models.ComputeNodeUpdateUserOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_update_user_options is not None: + timeout = compute_node_update_user_options.timeout + client_request_id = None + if compute_node_update_user_options is not None: + client_request_id = compute_node_update_user_options.client_request_id + return_client_request_id = None + if compute_node_update_user_options is not None: + return_client_request_id = compute_node_update_user_options.return_client_request_id + ocp_date = None + if compute_node_update_user_options is not None: + ocp_date = compute_node_update_user_options.ocp_date + + # Construct URL + url = self.update_user.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str'), + 'userName': self._serialize.url("user_name", user_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(node_update_user_parameter, 'NodeUpdateUserParameter') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + update_user.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/users/{userName}'} + + def get( + self, pool_id, node_id, compute_node_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified compute node. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node that you want to get + information about. + :type node_id: str + :param compute_node_get_options: Additional parameters for the + operation + :type compute_node_get_options: + ~azure.batch.models.ComputeNodeGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ComputeNode or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.ComputeNode or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if compute_node_get_options is not None: + select = compute_node_get_options.select + timeout = None + if compute_node_get_options is not None: + timeout = compute_node_get_options.timeout + client_request_id = None + if compute_node_get_options is not None: + client_request_id = compute_node_get_options.client_request_id + return_client_request_id = None + if compute_node_get_options is not None: + return_client_request_id = compute_node_get_options.return_client_request_id + ocp_date = None + if compute_node_get_options is not None: + ocp_date = compute_node_get_options.ocp_date + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('ComputeNode', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}'} + + def reboot( + self, pool_id, node_id, node_reboot_option=None, compute_node_reboot_options=None, custom_headers=None, raw=False, **operation_config): + """Restarts the specified compute node. + + You can restart a node only if it is in an idle or running state. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node that you want to restart. + :type node_id: str + :param node_reboot_option: When to reboot the compute node and what to + do with currently running tasks. The default value is requeue. + Possible values include: 'requeue', 'terminate', 'taskCompletion', + 'retainedData' + :type node_reboot_option: str or + ~azure.batch.models.ComputeNodeRebootOption + :param compute_node_reboot_options: Additional parameters for the + operation + :type compute_node_reboot_options: + ~azure.batch.models.ComputeNodeRebootOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_reboot_options is not None: + timeout = compute_node_reboot_options.timeout + client_request_id = None + if compute_node_reboot_options is not None: + client_request_id = compute_node_reboot_options.client_request_id + return_client_request_id = None + if compute_node_reboot_options is not None: + return_client_request_id = compute_node_reboot_options.return_client_request_id + ocp_date = None + if compute_node_reboot_options is not None: + ocp_date = compute_node_reboot_options.ocp_date + node_reboot_parameter = None + if node_reboot_option is not None: + node_reboot_parameter = models.NodeRebootParameter(node_reboot_option=node_reboot_option) + + # Construct URL + url = self.reboot.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + if node_reboot_parameter is not None: + body_content = self._serialize.body(node_reboot_parameter, 'NodeRebootParameter') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + reboot.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/reboot'} + + def reimage( + self, pool_id, node_id, node_reimage_option=None, compute_node_reimage_options=None, custom_headers=None, raw=False, **operation_config): + """Reinstalls the operating system on the specified compute node. + + You can reinstall the operating system on a node only if it is in an + idle or running state. This API can be invoked only on pools created + with the cloud service configuration property. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node that you want to restart. + :type node_id: str + :param node_reimage_option: When to reimage the compute node and what + to do with currently running tasks. The default value is requeue. + Possible values include: 'requeue', 'terminate', 'taskCompletion', + 'retainedData' + :type node_reimage_option: str or + ~azure.batch.models.ComputeNodeReimageOption + :param compute_node_reimage_options: Additional parameters for the + operation + :type compute_node_reimage_options: + ~azure.batch.models.ComputeNodeReimageOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_reimage_options is not None: + timeout = compute_node_reimage_options.timeout + client_request_id = None + if compute_node_reimage_options is not None: + client_request_id = compute_node_reimage_options.client_request_id + return_client_request_id = None + if compute_node_reimage_options is not None: + return_client_request_id = compute_node_reimage_options.return_client_request_id + ocp_date = None + if compute_node_reimage_options is not None: + ocp_date = compute_node_reimage_options.ocp_date + node_reimage_parameter = None + if node_reimage_option is not None: + node_reimage_parameter = models.NodeReimageParameter(node_reimage_option=node_reimage_option) + + # Construct URL + url = self.reimage.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + if node_reimage_parameter is not None: + body_content = self._serialize.body(node_reimage_parameter, 'NodeReimageParameter') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + reimage.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/reimage'} + + def disable_scheduling( + self, pool_id, node_id, node_disable_scheduling_option=None, compute_node_disable_scheduling_options=None, custom_headers=None, raw=False, **operation_config): + """Disables task scheduling on the specified compute node. + + You can disable task scheduling on a node only if its current + scheduling state is enabled. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node on which you want to + disable task scheduling. + :type node_id: str + :param node_disable_scheduling_option: What to do with currently + running tasks when disabling task scheduling on the compute node. The + default value is requeue. Possible values include: 'requeue', + 'terminate', 'taskCompletion' + :type node_disable_scheduling_option: str or + ~azure.batch.models.DisableComputeNodeSchedulingOption + :param compute_node_disable_scheduling_options: Additional parameters + for the operation + :type compute_node_disable_scheduling_options: + ~azure.batch.models.ComputeNodeDisableSchedulingOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_disable_scheduling_options is not None: + timeout = compute_node_disable_scheduling_options.timeout + client_request_id = None + if compute_node_disable_scheduling_options is not None: + client_request_id = compute_node_disable_scheduling_options.client_request_id + return_client_request_id = None + if compute_node_disable_scheduling_options is not None: + return_client_request_id = compute_node_disable_scheduling_options.return_client_request_id + ocp_date = None + if compute_node_disable_scheduling_options is not None: + ocp_date = compute_node_disable_scheduling_options.ocp_date + node_disable_scheduling_parameter = None + if node_disable_scheduling_option is not None: + node_disable_scheduling_parameter = models.NodeDisableSchedulingParameter(node_disable_scheduling_option=node_disable_scheduling_option) + + # Construct URL + url = self.disable_scheduling.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + if node_disable_scheduling_parameter is not None: + body_content = self._serialize.body(node_disable_scheduling_parameter, 'NodeDisableSchedulingParameter') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + disable_scheduling.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/disablescheduling'} + + def enable_scheduling( + self, pool_id, node_id, compute_node_enable_scheduling_options=None, custom_headers=None, raw=False, **operation_config): + """Enables task scheduling on the specified compute node. + + You can enable task scheduling on a node only if its current scheduling + state is disabled. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node on which you want to enable + task scheduling. + :type node_id: str + :param compute_node_enable_scheduling_options: Additional parameters + for the operation + :type compute_node_enable_scheduling_options: + ~azure.batch.models.ComputeNodeEnableSchedulingOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_enable_scheduling_options is not None: + timeout = compute_node_enable_scheduling_options.timeout + client_request_id = None + if compute_node_enable_scheduling_options is not None: + client_request_id = compute_node_enable_scheduling_options.client_request_id + return_client_request_id = None + if compute_node_enable_scheduling_options is not None: + return_client_request_id = compute_node_enable_scheduling_options.return_client_request_id + ocp_date = None + if compute_node_enable_scheduling_options is not None: + ocp_date = compute_node_enable_scheduling_options.ocp_date + + # Construct URL + url = self.enable_scheduling.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + enable_scheduling.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/enablescheduling'} + + def get_remote_login_settings( + self, pool_id, node_id, compute_node_get_remote_login_settings_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the settings required for remote login to a compute node. + + Before you can remotely login to a node using the remote login + settings, you must create a user account on the node. This API can be + invoked only on pools created with the virtual machine configuration + property. For pools created with a cloud service configuration, see the + GetRemoteDesktop API. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node for which to obtain the + remote login settings. + :type node_id: str + :param compute_node_get_remote_login_settings_options: Additional + parameters for the operation + :type compute_node_get_remote_login_settings_options: + ~azure.batch.models.ComputeNodeGetRemoteLoginSettingsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ComputeNodeGetRemoteLoginSettingsResult or ClientRawResponse + if raw=true + :rtype: ~azure.batch.models.ComputeNodeGetRemoteLoginSettingsResult or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_get_remote_login_settings_options is not None: + timeout = compute_node_get_remote_login_settings_options.timeout + client_request_id = None + if compute_node_get_remote_login_settings_options is not None: + client_request_id = compute_node_get_remote_login_settings_options.client_request_id + return_client_request_id = None + if compute_node_get_remote_login_settings_options is not None: + return_client_request_id = compute_node_get_remote_login_settings_options.return_client_request_id + ocp_date = None + if compute_node_get_remote_login_settings_options is not None: + ocp_date = compute_node_get_remote_login_settings_options.ocp_date + + # Construct URL + url = self.get_remote_login_settings.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('ComputeNodeGetRemoteLoginSettingsResult', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_remote_login_settings.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/remoteloginsettings'} + + def get_remote_desktop( + self, pool_id, node_id, compute_node_get_remote_desktop_options=None, custom_headers=None, raw=False, callback=None, **operation_config): + """Gets the Remote Desktop Protocol file for the specified compute node. + + Before you can access a node by using the RDP file, you must create a + user account on the node. This API can only be invoked on pools created + with a cloud service configuration. For pools created with a virtual + machine configuration, see the GetRemoteLoginSettings API. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node for which you want to get + the Remote Desktop Protocol file. + :type node_id: str + :param compute_node_get_remote_desktop_options: Additional parameters + for the operation + :type compute_node_get_remote_desktop_options: + ~azure.batch.models.ComputeNodeGetRemoteDesktopOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param callback: When specified, will be called with each chunk of + data that is streamed. The callback should take two arguments, the + bytes of the current chunk of data and the response object. If the + data is uploading, response will be None. + :type callback: Callable[Bytes, response=None] + :param operation_config: :ref:`Operation configuration + overrides`. + :return: object or ClientRawResponse if raw=true + :rtype: Generator or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_get_remote_desktop_options is not None: + timeout = compute_node_get_remote_desktop_options.timeout + client_request_id = None + if compute_node_get_remote_desktop_options is not None: + client_request_id = compute_node_get_remote_desktop_options.client_request_id + return_client_request_id = None + if compute_node_get_remote_desktop_options is not None: + return_client_request_id = compute_node_get_remote_desktop_options.return_client_request_id + ocp_date = None + if compute_node_get_remote_desktop_options is not None: + ocp_date = compute_node_get_remote_desktop_options.ocp_date + + # Construct URL + url = self.get_remote_desktop.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=True, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._client.stream_download(response, callback) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_remote_desktop.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/rdp'} + + def upload_batch_service_logs( + self, pool_id, node_id, upload_batch_service_logs_configuration, compute_node_upload_batch_service_logs_options=None, custom_headers=None, raw=False, **operation_config): + """Upload Azure Batch service log files from the specified compute node to + Azure Blob Storage. + + This is for gathering Azure Batch service log files in an automated + fashion from nodes if you are experiencing an error and wish to + escalate to Azure support. The Azure Batch service log files should be + shared with Azure support to aid in debugging issues with the Batch + service. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node from which you want to + upload the Azure Batch service log files. + :type node_id: str + :param upload_batch_service_logs_configuration: The Azure Batch + service log files upload configuration. + :type upload_batch_service_logs_configuration: + ~azure.batch.models.UploadBatchServiceLogsConfiguration + :param compute_node_upload_batch_service_logs_options: Additional + parameters for the operation + :type compute_node_upload_batch_service_logs_options: + ~azure.batch.models.ComputeNodeUploadBatchServiceLogsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: UploadBatchServiceLogsResult or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.UploadBatchServiceLogsResult or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_upload_batch_service_logs_options is not None: + timeout = compute_node_upload_batch_service_logs_options.timeout + client_request_id = None + if compute_node_upload_batch_service_logs_options is not None: + client_request_id = compute_node_upload_batch_service_logs_options.client_request_id + return_client_request_id = None + if compute_node_upload_batch_service_logs_options is not None: + return_client_request_id = compute_node_upload_batch_service_logs_options.return_client_request_id + ocp_date = None + if compute_node_upload_batch_service_logs_options is not None: + ocp_date = compute_node_upload_batch_service_logs_options.ocp_date + + # Construct URL + url = self.upload_batch_service_logs.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(upload_batch_service_logs_configuration, 'UploadBatchServiceLogsConfiguration') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('UploadBatchServiceLogsResult', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + upload_batch_service_logs.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/uploadbatchservicelogs'} + + def list( + self, pool_id, compute_node_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists the compute nodes in the specified pool. + + :param pool_id: The ID of the pool from which you want to list nodes. + :type pool_id: str + :param compute_node_list_options: Additional parameters for the + operation + :type compute_node_list_options: + ~azure.batch.models.ComputeNodeListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of ComputeNode + :rtype: + ~azure.batch.models.ComputeNodePaged[~azure.batch.models.ComputeNode] + :raises: + :class:`BatchErrorException` + """ + filter = None + if compute_node_list_options is not None: + filter = compute_node_list_options.filter + select = None + if compute_node_list_options is not None: + select = compute_node_list_options.select + max_results = None + if compute_node_list_options is not None: + max_results = compute_node_list_options.max_results + timeout = None + if compute_node_list_options is not None: + timeout = compute_node_list_options.timeout + client_request_id = None + if compute_node_list_options is not None: + client_request_id = compute_node_list_options.client_request_id + return_client_request_id = None + if compute_node_list_options is not None: + return_client_request_id = compute_node_list_options.return_client_request_id + ocp_date = None + if compute_node_list_options is not None: + ocp_date = compute_node_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.ComputeNodePaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.ComputeNodePaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/pools/{poolId}/nodes'} diff --git a/azext/generated/sdk/batch/v2018_08_01/operations/file_operations.py b/azext/generated/sdk/batch/v2018_08_01/operations/file_operations.py new file mode 100644 index 00000000..f3c81cfa --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/operations/file_operations.py @@ -0,0 +1,898 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class FileOperations(object): + """FileOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2018-08-01.7.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2018-08-01.7.0" + + self.config = config + + def delete_from_task( + self, job_id, task_id, file_path, recursive=None, file_delete_from_task_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes the specified task file from the compute node where the task + ran. + + :param job_id: The ID of the job that contains the task. + :type job_id: str + :param task_id: The ID of the task whose file you want to delete. + :type task_id: str + :param file_path: The path to the task file or directory that you want + to delete. + :type file_path: str + :param recursive: Whether to delete children of a directory. If the + filePath parameter represents a directory instead of a file, you can + set recursive to true to delete the directory and all of the files and + subdirectories in it. If recursive is false then the directory must be + empty or deletion will fail. + :type recursive: bool + :param file_delete_from_task_options: Additional parameters for the + operation + :type file_delete_from_task_options: + ~azure.batch.models.FileDeleteFromTaskOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_delete_from_task_options is not None: + timeout = file_delete_from_task_options.timeout + client_request_id = None + if file_delete_from_task_options is not None: + client_request_id = file_delete_from_task_options.client_request_id + return_client_request_id = None + if file_delete_from_task_options is not None: + return_client_request_id = file_delete_from_task_options.return_client_request_id + ocp_date = None + if file_delete_from_task_options is not None: + ocp_date = file_delete_from_task_options.ocp_date + + # Construct URL + url = self.delete_from_task.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if recursive is not None: + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete_from_task.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/files/{filePath}'} + + def get_from_task( + self, job_id, task_id, file_path, file_get_from_task_options=None, custom_headers=None, raw=False, callback=None, **operation_config): + """Returns the content of the specified task file. + + :param job_id: The ID of the job that contains the task. + :type job_id: str + :param task_id: The ID of the task whose file you want to retrieve. + :type task_id: str + :param file_path: The path to the task file that you want to get the + content of. + :type file_path: str + :param file_get_from_task_options: Additional parameters for the + operation + :type file_get_from_task_options: + ~azure.batch.models.FileGetFromTaskOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param callback: When specified, will be called with each chunk of + data that is streamed. The callback should take two arguments, the + bytes of the current chunk of data and the response object. If the + data is uploading, response will be None. + :type callback: Callable[Bytes, response=None] + :param operation_config: :ref:`Operation configuration + overrides`. + :return: object or ClientRawResponse if raw=true + :rtype: Generator or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_get_from_task_options is not None: + timeout = file_get_from_task_options.timeout + client_request_id = None + if file_get_from_task_options is not None: + client_request_id = file_get_from_task_options.client_request_id + return_client_request_id = None + if file_get_from_task_options is not None: + return_client_request_id = file_get_from_task_options.return_client_request_id + ocp_date = None + if file_get_from_task_options is not None: + ocp_date = file_get_from_task_options.ocp_date + ocp_range = None + if file_get_from_task_options is not None: + ocp_range = file_get_from_task_options.ocp_range + if_modified_since = None + if file_get_from_task_options is not None: + if_modified_since = file_get_from_task_options.if_modified_since + if_unmodified_since = None + if file_get_from_task_options is not None: + if_unmodified_since = file_get_from_task_options.if_unmodified_since + + # Construct URL + url = self.get_from_task.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if ocp_range is not None: + header_parameters['ocp-range'] = self._serialize.header("ocp_range", ocp_range, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=True, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._client.stream_download(response, callback) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'ocp-creation-time': 'rfc-1123', + 'ocp-batch-file-isdirectory': 'bool', + 'ocp-batch-file-url': 'str', + 'ocp-batch-file-mode': 'str', + 'Content-Type': 'str', + 'Content-Length': 'long', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_from_task.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/files/{filePath}'} + + def get_properties_from_task( + self, job_id, task_id, file_path, file_get_properties_from_task_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the properties of the specified task file. + + :param job_id: The ID of the job that contains the task. + :type job_id: str + :param task_id: The ID of the task whose file you want to get the + properties of. + :type task_id: str + :param file_path: The path to the task file that you want to get the + properties of. + :type file_path: str + :param file_get_properties_from_task_options: Additional parameters + for the operation + :type file_get_properties_from_task_options: + ~azure.batch.models.FileGetPropertiesFromTaskOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_get_properties_from_task_options is not None: + timeout = file_get_properties_from_task_options.timeout + client_request_id = None + if file_get_properties_from_task_options is not None: + client_request_id = file_get_properties_from_task_options.client_request_id + return_client_request_id = None + if file_get_properties_from_task_options is not None: + return_client_request_id = file_get_properties_from_task_options.return_client_request_id + ocp_date = None + if file_get_properties_from_task_options is not None: + ocp_date = file_get_properties_from_task_options.ocp_date + if_modified_since = None + if file_get_properties_from_task_options is not None: + if_modified_since = file_get_properties_from_task_options.if_modified_since + if_unmodified_since = None + if file_get_properties_from_task_options is not None: + if_unmodified_since = file_get_properties_from_task_options.if_unmodified_since + + # Construct URL + url = self.get_properties_from_task.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'ocp-creation-time': 'rfc-1123', + 'ocp-batch-file-isdirectory': 'bool', + 'ocp-batch-file-url': 'str', + 'ocp-batch-file-mode': 'str', + 'Content-Type': 'str', + 'Content-Length': 'long', + }) + return client_raw_response + get_properties_from_task.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/files/{filePath}'} + + def delete_from_compute_node( + self, pool_id, node_id, file_path, recursive=None, file_delete_from_compute_node_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes the specified file from the compute node. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node from which you want to + delete the file. + :type node_id: str + :param file_path: The path to the file or directory that you want to + delete. + :type file_path: str + :param recursive: Whether to delete children of a directory. If the + filePath parameter represents a directory instead of a file, you can + set recursive to true to delete the directory and all of the files and + subdirectories in it. If recursive is false then the directory must be + empty or deletion will fail. + :type recursive: bool + :param file_delete_from_compute_node_options: Additional parameters + for the operation + :type file_delete_from_compute_node_options: + ~azure.batch.models.FileDeleteFromComputeNodeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_delete_from_compute_node_options is not None: + timeout = file_delete_from_compute_node_options.timeout + client_request_id = None + if file_delete_from_compute_node_options is not None: + client_request_id = file_delete_from_compute_node_options.client_request_id + return_client_request_id = None + if file_delete_from_compute_node_options is not None: + return_client_request_id = file_delete_from_compute_node_options.return_client_request_id + ocp_date = None + if file_delete_from_compute_node_options is not None: + ocp_date = file_delete_from_compute_node_options.ocp_date + + # Construct URL + url = self.delete_from_compute_node.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if recursive is not None: + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete_from_compute_node.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/files/{filePath}'} + + def get_from_compute_node( + self, pool_id, node_id, file_path, file_get_from_compute_node_options=None, custom_headers=None, raw=False, callback=None, **operation_config): + """Returns the content of the specified compute node file. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node that contains the file. + :type node_id: str + :param file_path: The path to the compute node file that you want to + get the content of. + :type file_path: str + :param file_get_from_compute_node_options: Additional parameters for + the operation + :type file_get_from_compute_node_options: + ~azure.batch.models.FileGetFromComputeNodeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param callback: When specified, will be called with each chunk of + data that is streamed. The callback should take two arguments, the + bytes of the current chunk of data and the response object. If the + data is uploading, response will be None. + :type callback: Callable[Bytes, response=None] + :param operation_config: :ref:`Operation configuration + overrides`. + :return: object or ClientRawResponse if raw=true + :rtype: Generator or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_get_from_compute_node_options is not None: + timeout = file_get_from_compute_node_options.timeout + client_request_id = None + if file_get_from_compute_node_options is not None: + client_request_id = file_get_from_compute_node_options.client_request_id + return_client_request_id = None + if file_get_from_compute_node_options is not None: + return_client_request_id = file_get_from_compute_node_options.return_client_request_id + ocp_date = None + if file_get_from_compute_node_options is not None: + ocp_date = file_get_from_compute_node_options.ocp_date + ocp_range = None + if file_get_from_compute_node_options is not None: + ocp_range = file_get_from_compute_node_options.ocp_range + if_modified_since = None + if file_get_from_compute_node_options is not None: + if_modified_since = file_get_from_compute_node_options.if_modified_since + if_unmodified_since = None + if file_get_from_compute_node_options is not None: + if_unmodified_since = file_get_from_compute_node_options.if_unmodified_since + + # Construct URL + url = self.get_from_compute_node.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if ocp_range is not None: + header_parameters['ocp-range'] = self._serialize.header("ocp_range", ocp_range, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=True, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._client.stream_download(response, callback) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'ocp-creation-time': 'rfc-1123', + 'ocp-batch-file-isdirectory': 'bool', + 'ocp-batch-file-url': 'str', + 'ocp-batch-file-mode': 'str', + 'Content-Type': 'str', + 'Content-Length': 'long', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_from_compute_node.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/files/{filePath}'} + + def get_properties_from_compute_node( + self, pool_id, node_id, file_path, file_get_properties_from_compute_node_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the properties of the specified compute node file. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node that contains the file. + :type node_id: str + :param file_path: The path to the compute node file that you want to + get the properties of. + :type file_path: str + :param file_get_properties_from_compute_node_options: Additional + parameters for the operation + :type file_get_properties_from_compute_node_options: + ~azure.batch.models.FileGetPropertiesFromComputeNodeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_get_properties_from_compute_node_options is not None: + timeout = file_get_properties_from_compute_node_options.timeout + client_request_id = None + if file_get_properties_from_compute_node_options is not None: + client_request_id = file_get_properties_from_compute_node_options.client_request_id + return_client_request_id = None + if file_get_properties_from_compute_node_options is not None: + return_client_request_id = file_get_properties_from_compute_node_options.return_client_request_id + ocp_date = None + if file_get_properties_from_compute_node_options is not None: + ocp_date = file_get_properties_from_compute_node_options.ocp_date + if_modified_since = None + if file_get_properties_from_compute_node_options is not None: + if_modified_since = file_get_properties_from_compute_node_options.if_modified_since + if_unmodified_since = None + if file_get_properties_from_compute_node_options is not None: + if_unmodified_since = file_get_properties_from_compute_node_options.if_unmodified_since + + # Construct URL + url = self.get_properties_from_compute_node.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'ocp-creation-time': 'rfc-1123', + 'ocp-batch-file-isdirectory': 'bool', + 'ocp-batch-file-url': 'str', + 'ocp-batch-file-mode': 'str', + 'Content-Type': 'str', + 'Content-Length': 'long', + }) + return client_raw_response + get_properties_from_compute_node.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/files/{filePath}'} + + def list_from_task( + self, job_id, task_id, recursive=None, file_list_from_task_options=None, custom_headers=None, raw=False, **operation_config): + """Lists the files in a task's directory on its compute node. + + :param job_id: The ID of the job that contains the task. + :type job_id: str + :param task_id: The ID of the task whose files you want to list. + :type task_id: str + :param recursive: Whether to list children of the task directory. This + parameter can be used in combination with the filter parameter to list + specific type of files. + :type recursive: bool + :param file_list_from_task_options: Additional parameters for the + operation + :type file_list_from_task_options: + ~azure.batch.models.FileListFromTaskOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of NodeFile + :rtype: + ~azure.batch.models.NodeFilePaged[~azure.batch.models.NodeFile] + :raises: + :class:`BatchErrorException` + """ + filter = None + if file_list_from_task_options is not None: + filter = file_list_from_task_options.filter + max_results = None + if file_list_from_task_options is not None: + max_results = file_list_from_task_options.max_results + timeout = None + if file_list_from_task_options is not None: + timeout = file_list_from_task_options.timeout + client_request_id = None + if file_list_from_task_options is not None: + client_request_id = file_list_from_task_options.client_request_id + return_client_request_id = None + if file_list_from_task_options is not None: + return_client_request_id = file_list_from_task_options.return_client_request_id + ocp_date = None + if file_list_from_task_options is not None: + ocp_date = file_list_from_task_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_from_task.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if recursive is not None: + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.NodeFilePaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.NodeFilePaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_from_task.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/files'} + + def list_from_compute_node( + self, pool_id, node_id, recursive=None, file_list_from_compute_node_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the files in task directories on the specified compute + node. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node whose files you want to + list. + :type node_id: str + :param recursive: Whether to list children of a directory. + :type recursive: bool + :param file_list_from_compute_node_options: Additional parameters for + the operation + :type file_list_from_compute_node_options: + ~azure.batch.models.FileListFromComputeNodeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of NodeFile + :rtype: + ~azure.batch.models.NodeFilePaged[~azure.batch.models.NodeFile] + :raises: + :class:`BatchErrorException` + """ + filter = None + if file_list_from_compute_node_options is not None: + filter = file_list_from_compute_node_options.filter + max_results = None + if file_list_from_compute_node_options is not None: + max_results = file_list_from_compute_node_options.max_results + timeout = None + if file_list_from_compute_node_options is not None: + timeout = file_list_from_compute_node_options.timeout + client_request_id = None + if file_list_from_compute_node_options is not None: + client_request_id = file_list_from_compute_node_options.client_request_id + return_client_request_id = None + if file_list_from_compute_node_options is not None: + return_client_request_id = file_list_from_compute_node_options.return_client_request_id + ocp_date = None + if file_list_from_compute_node_options is not None: + ocp_date = file_list_from_compute_node_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_from_compute_node.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if recursive is not None: + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.NodeFilePaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.NodeFilePaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_from_compute_node.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/files'} diff --git a/azext/generated/sdk/batch/v2018_08_01/operations/job_operations.py b/azext/generated/sdk/batch/v2018_08_01/operations/job_operations.py new file mode 100644 index 00000000..c814cd27 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/operations/job_operations.py @@ -0,0 +1,1439 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class JobOperations(object): + """JobOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2018-08-01.7.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2018-08-01.7.0" + + self.config = config + + def get_all_lifetime_statistics( + self, job_get_all_lifetime_statistics_options=None, custom_headers=None, raw=False, **operation_config): + """Gets lifetime summary statistics for all of the jobs in the specified + account. + + Statistics are aggregated across all jobs that have ever existed in the + account, from account creation to the last update time of the + statistics. The statistics may not be immediately available. The Batch + service performs periodic roll-up of statistics. The typical delay is + about 30 minutes. + + :param job_get_all_lifetime_statistics_options: Additional parameters + for the operation + :type job_get_all_lifetime_statistics_options: + ~azure.batch.models.JobGetAllLifetimeStatisticsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: JobStatistics or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.JobStatistics or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_get_all_lifetime_statistics_options is not None: + timeout = job_get_all_lifetime_statistics_options.timeout + client_request_id = None + if job_get_all_lifetime_statistics_options is not None: + client_request_id = job_get_all_lifetime_statistics_options.client_request_id + return_client_request_id = None + if job_get_all_lifetime_statistics_options is not None: + return_client_request_id = job_get_all_lifetime_statistics_options.return_client_request_id + ocp_date = None + if job_get_all_lifetime_statistics_options is not None: + ocp_date = job_get_all_lifetime_statistics_options.ocp_date + + # Construct URL + url = self.get_all_lifetime_statistics.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('JobStatistics', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_all_lifetime_statistics.metadata = {'url': '/lifetimejobstats'} + + def delete( + self, job_id, job_delete_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a job. + + Deleting a job also deletes all tasks that are part of that job, and + all job statistics. This also overrides the retention period for task + data; that is, if the job contains tasks which are still retained on + compute nodes, the Batch services deletes those tasks' working + directories and all their contents. When a Delete Job request is + received, the Batch service sets the job to the deleting state. All + update operations on a job that is in deleting state will fail with + status code 409 (Conflict), with additional information indicating that + the job is being deleted. + + :param job_id: The ID of the job to delete. + :type job_id: str + :param job_delete_options: Additional parameters for the operation + :type job_delete_options: ~azure.batch.models.JobDeleteOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_delete_options is not None: + timeout = job_delete_options.timeout + client_request_id = None + if job_delete_options is not None: + client_request_id = job_delete_options.client_request_id + return_client_request_id = None + if job_delete_options is not None: + return_client_request_id = job_delete_options.return_client_request_id + ocp_date = None + if job_delete_options is not None: + ocp_date = job_delete_options.ocp_date + if_match = None + if job_delete_options is not None: + if_match = job_delete_options.if_match + if_none_match = None + if job_delete_options is not None: + if_none_match = job_delete_options.if_none_match + if_modified_since = None + if job_delete_options is not None: + if_modified_since = job_delete_options.if_modified_since + if_unmodified_since = None + if job_delete_options is not None: + if_unmodified_since = job_delete_options.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete.metadata = {'url': '/jobs/{jobId}'} + + def get( + self, job_id, job_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified job. + + :param job_id: The ID of the job. + :type job_id: str + :param job_get_options: Additional parameters for the operation + :type job_get_options: ~azure.batch.models.JobGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: CloudJob or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.CloudJob or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if job_get_options is not None: + select = job_get_options.select + expand = None + if job_get_options is not None: + expand = job_get_options.expand + timeout = None + if job_get_options is not None: + timeout = job_get_options.timeout + client_request_id = None + if job_get_options is not None: + client_request_id = job_get_options.client_request_id + return_client_request_id = None + if job_get_options is not None: + return_client_request_id = job_get_options.return_client_request_id + ocp_date = None + if job_get_options is not None: + ocp_date = job_get_options.ocp_date + if_match = None + if job_get_options is not None: + if_match = job_get_options.if_match + if_none_match = None + if job_get_options is not None: + if_none_match = job_get_options.if_none_match + if_modified_since = None + if job_get_options is not None: + if_modified_since = job_get_options.if_modified_since + if_unmodified_since = None + if job_get_options is not None: + if_unmodified_since = job_get_options.if_unmodified_since + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('CloudJob', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/jobs/{jobId}'} + + def patch( + self, job_id, job_patch_parameter, job_patch_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified job. + + This replaces only the job properties specified in the request. For + example, if the job has constraints, and a request does not specify the + constraints element, then the job keeps the existing constraints. + + :param job_id: The ID of the job whose properties you want to update. + :type job_id: str + :param job_patch_parameter: The parameters for the request. + :type job_patch_parameter: ~azure.batch.models.JobPatchParameter + :param job_patch_options: Additional parameters for the operation + :type job_patch_options: ~azure.batch.models.JobPatchOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_patch_options is not None: + timeout = job_patch_options.timeout + client_request_id = None + if job_patch_options is not None: + client_request_id = job_patch_options.client_request_id + return_client_request_id = None + if job_patch_options is not None: + return_client_request_id = job_patch_options.return_client_request_id + ocp_date = None + if job_patch_options is not None: + ocp_date = job_patch_options.ocp_date + if_match = None + if job_patch_options is not None: + if_match = job_patch_options.if_match + if_none_match = None + if job_patch_options is not None: + if_none_match = job_patch_options.if_none_match + if_modified_since = None + if job_patch_options is not None: + if_modified_since = job_patch_options.if_modified_since + if_unmodified_since = None + if job_patch_options is not None: + if_unmodified_since = job_patch_options.if_unmodified_since + + # Construct URL + url = self.patch.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job_patch_parameter, 'JobPatchParameter') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + patch.metadata = {'url': '/jobs/{jobId}'} + + def update( + self, job_id, job_update_parameter, job_update_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified job. + + This fully replaces all the updatable properties of the job. For + example, if the job has constraints associated with it and if + constraints is not specified with this request, then the Batch service + will remove the existing constraints. + + :param job_id: The ID of the job whose properties you want to update. + :type job_id: str + :param job_update_parameter: The parameters for the request. + :type job_update_parameter: ~azure.batch.models.JobUpdateParameter + :param job_update_options: Additional parameters for the operation + :type job_update_options: ~azure.batch.models.JobUpdateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_update_options is not None: + timeout = job_update_options.timeout + client_request_id = None + if job_update_options is not None: + client_request_id = job_update_options.client_request_id + return_client_request_id = None + if job_update_options is not None: + return_client_request_id = job_update_options.return_client_request_id + ocp_date = None + if job_update_options is not None: + ocp_date = job_update_options.ocp_date + if_match = None + if job_update_options is not None: + if_match = job_update_options.if_match + if_none_match = None + if job_update_options is not None: + if_none_match = job_update_options.if_none_match + if_modified_since = None + if job_update_options is not None: + if_modified_since = job_update_options.if_modified_since + if_unmodified_since = None + if job_update_options is not None: + if_unmodified_since = job_update_options.if_unmodified_since + + # Construct URL + url = self.update.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job_update_parameter, 'JobUpdateParameter') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + update.metadata = {'url': '/jobs/{jobId}'} + + def disable( + self, job_id, disable_tasks, job_disable_options=None, custom_headers=None, raw=False, **operation_config): + """Disables the specified job, preventing new tasks from running. + + The Batch Service immediately moves the job to the disabling state. + Batch then uses the disableTasks parameter to determine what to do with + the currently running tasks of the job. The job remains in the + disabling state until the disable operation is completed and all tasks + have been dealt with according to the disableTasks option; the job then + moves to the disabled state. No new tasks are started under the job + until it moves back to active state. If you try to disable a job that + is in any state other than active, disabling, or disabled, the request + fails with status code 409. + + :param job_id: The ID of the job to disable. + :type job_id: str + :param disable_tasks: What to do with active tasks associated with the + job. Possible values include: 'requeue', 'terminate', 'wait' + :type disable_tasks: str or ~azure.batch.models.DisableJobOption + :param job_disable_options: Additional parameters for the operation + :type job_disable_options: ~azure.batch.models.JobDisableOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_disable_options is not None: + timeout = job_disable_options.timeout + client_request_id = None + if job_disable_options is not None: + client_request_id = job_disable_options.client_request_id + return_client_request_id = None + if job_disable_options is not None: + return_client_request_id = job_disable_options.return_client_request_id + ocp_date = None + if job_disable_options is not None: + ocp_date = job_disable_options.ocp_date + if_match = None + if job_disable_options is not None: + if_match = job_disable_options.if_match + if_none_match = None + if job_disable_options is not None: + if_none_match = job_disable_options.if_none_match + if_modified_since = None + if job_disable_options is not None: + if_modified_since = job_disable_options.if_modified_since + if_unmodified_since = None + if job_disable_options is not None: + if_unmodified_since = job_disable_options.if_unmodified_since + job_disable_parameter = models.JobDisableParameter(disable_tasks=disable_tasks) + + # Construct URL + url = self.disable.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job_disable_parameter, 'JobDisableParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + disable.metadata = {'url': '/jobs/{jobId}/disable'} + + def enable( + self, job_id, job_enable_options=None, custom_headers=None, raw=False, **operation_config): + """Enables the specified job, allowing new tasks to run. + + When you call this API, the Batch service sets a disabled job to the + enabling state. After the this operation is completed, the job moves to + the active state, and scheduling of new tasks under the job resumes. + The Batch service does not allow a task to remain in the active state + for more than 180 days. Therefore, if you enable a job containing + active tasks which were added more than 180 days ago, those tasks will + not run. + + :param job_id: The ID of the job to enable. + :type job_id: str + :param job_enable_options: Additional parameters for the operation + :type job_enable_options: ~azure.batch.models.JobEnableOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_enable_options is not None: + timeout = job_enable_options.timeout + client_request_id = None + if job_enable_options is not None: + client_request_id = job_enable_options.client_request_id + return_client_request_id = None + if job_enable_options is not None: + return_client_request_id = job_enable_options.return_client_request_id + ocp_date = None + if job_enable_options is not None: + ocp_date = job_enable_options.ocp_date + if_match = None + if job_enable_options is not None: + if_match = job_enable_options.if_match + if_none_match = None + if job_enable_options is not None: + if_none_match = job_enable_options.if_none_match + if_modified_since = None + if job_enable_options is not None: + if_modified_since = job_enable_options.if_modified_since + if_unmodified_since = None + if job_enable_options is not None: + if_unmodified_since = job_enable_options.if_unmodified_since + + # Construct URL + url = self.enable.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + enable.metadata = {'url': '/jobs/{jobId}/enable'} + + def terminate( + self, job_id, terminate_reason=None, job_terminate_options=None, custom_headers=None, raw=False, **operation_config): + """Terminates the specified job, marking it as completed. + + When a Terminate Job request is received, the Batch service sets the + job to the terminating state. The Batch service then terminates any + running tasks associated with the job and runs any required job release + tasks. Then the job moves into the completed state. If there are any + tasks in the job in the active state, they will remain in the active + state. Once a job is terminated, new tasks cannot be added and any + remaining active tasks will not be scheduled. + + :param job_id: The ID of the job to terminate. + :type job_id: str + :param terminate_reason: The text you want to appear as the job's + TerminateReason. The default is 'UserTerminate'. + :type terminate_reason: str + :param job_terminate_options: Additional parameters for the operation + :type job_terminate_options: ~azure.batch.models.JobTerminateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_terminate_options is not None: + timeout = job_terminate_options.timeout + client_request_id = None + if job_terminate_options is not None: + client_request_id = job_terminate_options.client_request_id + return_client_request_id = None + if job_terminate_options is not None: + return_client_request_id = job_terminate_options.return_client_request_id + ocp_date = None + if job_terminate_options is not None: + ocp_date = job_terminate_options.ocp_date + if_match = None + if job_terminate_options is not None: + if_match = job_terminate_options.if_match + if_none_match = None + if job_terminate_options is not None: + if_none_match = job_terminate_options.if_none_match + if_modified_since = None + if job_terminate_options is not None: + if_modified_since = job_terminate_options.if_modified_since + if_unmodified_since = None + if job_terminate_options is not None: + if_unmodified_since = job_terminate_options.if_unmodified_since + job_terminate_parameter = None + if terminate_reason is not None: + job_terminate_parameter = models.JobTerminateParameter(terminate_reason=terminate_reason) + + # Construct URL + url = self.terminate.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + if job_terminate_parameter is not None: + body_content = self._serialize.body(job_terminate_parameter, 'JobTerminateParameter') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + terminate.metadata = {'url': '/jobs/{jobId}/terminate'} + + def add( + self, job, job_add_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a job to the specified account. + + The Batch service supports two ways to control the work done as part of + a job. In the first approach, the user specifies a Job Manager task. + The Batch service launches this task when it is ready to start the job. + The Job Manager task controls all other tasks that run under this job, + by using the Task APIs. In the second approach, the user directly + controls the execution of tasks under an active job, by using the Task + APIs. Also note: when naming jobs, avoid including sensitive + information such as user names or secret project names. This + information may appear in telemetry logs accessible to Microsoft + Support engineers. + + :param job: The job to be added. + :type job: ~azure.batch.models.JobAddParameter + :param job_add_options: Additional parameters for the operation + :type job_add_options: ~azure.batch.models.JobAddOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_add_options is not None: + timeout = job_add_options.timeout + client_request_id = None + if job_add_options is not None: + client_request_id = job_add_options.client_request_id + return_client_request_id = None + if job_add_options is not None: + return_client_request_id = job_add_options.return_client_request_id + ocp_date = None + if job_add_options is not None: + ocp_date = job_add_options.ocp_date + + # Construct URL + url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job, 'JobAddParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add.metadata = {'url': '/jobs'} + + def list( + self, job_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the jobs in the specified account. + + :param job_list_options: Additional parameters for the operation + :type job_list_options: ~azure.batch.models.JobListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of CloudJob + :rtype: + ~azure.batch.models.CloudJobPaged[~azure.batch.models.CloudJob] + :raises: + :class:`BatchErrorException` + """ + filter = None + if job_list_options is not None: + filter = job_list_options.filter + select = None + if job_list_options is not None: + select = job_list_options.select + expand = None + if job_list_options is not None: + expand = job_list_options.expand + max_results = None + if job_list_options is not None: + max_results = job_list_options.max_results + timeout = None + if job_list_options is not None: + timeout = job_list_options.timeout + client_request_id = None + if job_list_options is not None: + client_request_id = job_list_options.client_request_id + return_client_request_id = None + if job_list_options is not None: + return_client_request_id = job_list_options.return_client_request_id + ocp_date = None + if job_list_options is not None: + ocp_date = job_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CloudJobPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CloudJobPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/jobs'} + + def list_from_job_schedule( + self, job_schedule_id, job_list_from_job_schedule_options=None, custom_headers=None, raw=False, **operation_config): + """Lists the jobs that have been created under the specified job schedule. + + :param job_schedule_id: The ID of the job schedule from which you want + to get a list of jobs. + :type job_schedule_id: str + :param job_list_from_job_schedule_options: Additional parameters for + the operation + :type job_list_from_job_schedule_options: + ~azure.batch.models.JobListFromJobScheduleOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of CloudJob + :rtype: + ~azure.batch.models.CloudJobPaged[~azure.batch.models.CloudJob] + :raises: + :class:`BatchErrorException` + """ + filter = None + if job_list_from_job_schedule_options is not None: + filter = job_list_from_job_schedule_options.filter + select = None + if job_list_from_job_schedule_options is not None: + select = job_list_from_job_schedule_options.select + expand = None + if job_list_from_job_schedule_options is not None: + expand = job_list_from_job_schedule_options.expand + max_results = None + if job_list_from_job_schedule_options is not None: + max_results = job_list_from_job_schedule_options.max_results + timeout = None + if job_list_from_job_schedule_options is not None: + timeout = job_list_from_job_schedule_options.timeout + client_request_id = None + if job_list_from_job_schedule_options is not None: + client_request_id = job_list_from_job_schedule_options.client_request_id + return_client_request_id = None + if job_list_from_job_schedule_options is not None: + return_client_request_id = job_list_from_job_schedule_options.return_client_request_id + ocp_date = None + if job_list_from_job_schedule_options is not None: + ocp_date = job_list_from_job_schedule_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_from_job_schedule.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CloudJobPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CloudJobPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_from_job_schedule.metadata = {'url': '/jobschedules/{jobScheduleId}/jobs'} + + def list_preparation_and_release_task_status( + self, job_id, job_list_preparation_and_release_task_status_options=None, custom_headers=None, raw=False, **operation_config): + """Lists the execution status of the Job Preparation and Job Release task + for the specified job across the compute nodes where the job has run. + + This API returns the Job Preparation and Job Release task status on all + compute nodes that have run the Job Preparation or Job Release task. + This includes nodes which have since been removed from the pool. If + this API is invoked on a job which has no Job Preparation or Job + Release task, the Batch service returns HTTP status code 409 (Conflict) + with an error code of JobPreparationTaskNotSpecified. + + :param job_id: The ID of the job. + :type job_id: str + :param job_list_preparation_and_release_task_status_options: + Additional parameters for the operation + :type job_list_preparation_and_release_task_status_options: + ~azure.batch.models.JobListPreparationAndReleaseTaskStatusOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of + JobPreparationAndReleaseTaskExecutionInformation + :rtype: + ~azure.batch.models.JobPreparationAndReleaseTaskExecutionInformationPaged[~azure.batch.models.JobPreparationAndReleaseTaskExecutionInformation] + :raises: + :class:`BatchErrorException` + """ + filter = None + if job_list_preparation_and_release_task_status_options is not None: + filter = job_list_preparation_and_release_task_status_options.filter + select = None + if job_list_preparation_and_release_task_status_options is not None: + select = job_list_preparation_and_release_task_status_options.select + max_results = None + if job_list_preparation_and_release_task_status_options is not None: + max_results = job_list_preparation_and_release_task_status_options.max_results + timeout = None + if job_list_preparation_and_release_task_status_options is not None: + timeout = job_list_preparation_and_release_task_status_options.timeout + client_request_id = None + if job_list_preparation_and_release_task_status_options is not None: + client_request_id = job_list_preparation_and_release_task_status_options.client_request_id + return_client_request_id = None + if job_list_preparation_and_release_task_status_options is not None: + return_client_request_id = job_list_preparation_and_release_task_status_options.return_client_request_id + ocp_date = None + if job_list_preparation_and_release_task_status_options is not None: + ocp_date = job_list_preparation_and_release_task_status_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_preparation_and_release_task_status.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.JobPreparationAndReleaseTaskExecutionInformationPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.JobPreparationAndReleaseTaskExecutionInformationPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_preparation_and_release_task_status.metadata = {'url': '/jobs/{jobId}/jobpreparationandreleasetaskstatus'} + + def get_task_counts( + self, job_id, job_get_task_counts_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the task counts for the specified job. + + Task counts provide a count of the tasks by active, running or + completed task state, and a count of tasks which succeeded or failed. + Tasks in the preparing state are counted as running. + + :param job_id: The ID of the job. + :type job_id: str + :param job_get_task_counts_options: Additional parameters for the + operation + :type job_get_task_counts_options: + ~azure.batch.models.JobGetTaskCountsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: TaskCounts or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.TaskCounts or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_get_task_counts_options is not None: + timeout = job_get_task_counts_options.timeout + client_request_id = None + if job_get_task_counts_options is not None: + client_request_id = job_get_task_counts_options.client_request_id + return_client_request_id = None + if job_get_task_counts_options is not None: + return_client_request_id = job_get_task_counts_options.return_client_request_id + ocp_date = None + if job_get_task_counts_options is not None: + ocp_date = job_get_task_counts_options.ocp_date + + # Construct URL + url = self.get_task_counts.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('TaskCounts', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_task_counts.metadata = {'url': '/jobs/{jobId}/taskcounts'} diff --git a/azext/generated/sdk/batch/v2018_08_01/operations/job_schedule_operations.py b/azext/generated/sdk/batch/v2018_08_01/operations/job_schedule_operations.py new file mode 100644 index 00000000..96b91d90 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/operations/job_schedule_operations.py @@ -0,0 +1,1093 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class JobScheduleOperations(object): + """JobScheduleOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2018-08-01.7.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2018-08-01.7.0" + + self.config = config + + def exists( + self, job_schedule_id, job_schedule_exists_options=None, custom_headers=None, raw=False, **operation_config): + """Checks the specified job schedule exists. + + :param job_schedule_id: The ID of the job schedule which you want to + check. + :type job_schedule_id: str + :param job_schedule_exists_options: Additional parameters for the + operation + :type job_schedule_exists_options: + ~azure.batch.models.JobScheduleExistsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: bool or ClientRawResponse if raw=true + :rtype: bool or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_exists_options is not None: + timeout = job_schedule_exists_options.timeout + client_request_id = None + if job_schedule_exists_options is not None: + client_request_id = job_schedule_exists_options.client_request_id + return_client_request_id = None + if job_schedule_exists_options is not None: + return_client_request_id = job_schedule_exists_options.return_client_request_id + ocp_date = None + if job_schedule_exists_options is not None: + ocp_date = job_schedule_exists_options.ocp_date + if_match = None + if job_schedule_exists_options is not None: + if_match = job_schedule_exists_options.if_match + if_none_match = None + if job_schedule_exists_options is not None: + if_none_match = job_schedule_exists_options.if_none_match + if_modified_since = None + if job_schedule_exists_options is not None: + if_modified_since = job_schedule_exists_options.if_modified_since + if_unmodified_since = None + if job_schedule_exists_options is not None: + if_unmodified_since = job_schedule_exists_options.if_unmodified_since + + # Construct URL + url = self.exists.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 404]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = (response.status_code == 200) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + }) + return client_raw_response + return deserialized + exists.metadata = {'url': '/jobschedules/{jobScheduleId}'} + + def delete( + self, job_schedule_id, job_schedule_delete_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a job schedule from the specified account. + + When you delete a job schedule, this also deletes all jobs and tasks + under that schedule. When tasks are deleted, all the files in their + working directories on the compute nodes are also deleted (the + retention period is ignored). The job schedule statistics are no longer + accessible once the job schedule is deleted, though they are still + counted towards account lifetime statistics. + + :param job_schedule_id: The ID of the job schedule to delete. + :type job_schedule_id: str + :param job_schedule_delete_options: Additional parameters for the + operation + :type job_schedule_delete_options: + ~azure.batch.models.JobScheduleDeleteOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_delete_options is not None: + timeout = job_schedule_delete_options.timeout + client_request_id = None + if job_schedule_delete_options is not None: + client_request_id = job_schedule_delete_options.client_request_id + return_client_request_id = None + if job_schedule_delete_options is not None: + return_client_request_id = job_schedule_delete_options.return_client_request_id + ocp_date = None + if job_schedule_delete_options is not None: + ocp_date = job_schedule_delete_options.ocp_date + if_match = None + if job_schedule_delete_options is not None: + if_match = job_schedule_delete_options.if_match + if_none_match = None + if job_schedule_delete_options is not None: + if_none_match = job_schedule_delete_options.if_none_match + if_modified_since = None + if job_schedule_delete_options is not None: + if_modified_since = job_schedule_delete_options.if_modified_since + if_unmodified_since = None + if job_schedule_delete_options is not None: + if_unmodified_since = job_schedule_delete_options.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete.metadata = {'url': '/jobschedules/{jobScheduleId}'} + + def get( + self, job_schedule_id, job_schedule_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified job schedule. + + :param job_schedule_id: The ID of the job schedule to get. + :type job_schedule_id: str + :param job_schedule_get_options: Additional parameters for the + operation + :type job_schedule_get_options: + ~azure.batch.models.JobScheduleGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: CloudJobSchedule or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.CloudJobSchedule or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if job_schedule_get_options is not None: + select = job_schedule_get_options.select + expand = None + if job_schedule_get_options is not None: + expand = job_schedule_get_options.expand + timeout = None + if job_schedule_get_options is not None: + timeout = job_schedule_get_options.timeout + client_request_id = None + if job_schedule_get_options is not None: + client_request_id = job_schedule_get_options.client_request_id + return_client_request_id = None + if job_schedule_get_options is not None: + return_client_request_id = job_schedule_get_options.return_client_request_id + ocp_date = None + if job_schedule_get_options is not None: + ocp_date = job_schedule_get_options.ocp_date + if_match = None + if job_schedule_get_options is not None: + if_match = job_schedule_get_options.if_match + if_none_match = None + if job_schedule_get_options is not None: + if_none_match = job_schedule_get_options.if_none_match + if_modified_since = None + if job_schedule_get_options is not None: + if_modified_since = job_schedule_get_options.if_modified_since + if_unmodified_since = None + if job_schedule_get_options is not None: + if_unmodified_since = job_schedule_get_options.if_unmodified_since + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('CloudJobSchedule', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/jobschedules/{jobScheduleId}'} + + def patch( + self, job_schedule_id, job_schedule_patch_parameter, job_schedule_patch_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified job schedule. + + This replaces only the job schedule properties specified in the + request. For example, if the schedule property is not specified with + this request, then the Batch service will keep the existing schedule. + Changes to a job schedule only impact jobs created by the schedule + after the update has taken place; currently running jobs are + unaffected. + + :param job_schedule_id: The ID of the job schedule to update. + :type job_schedule_id: str + :param job_schedule_patch_parameter: The parameters for the request. + :type job_schedule_patch_parameter: + ~azure.batch.models.JobSchedulePatchParameter + :param job_schedule_patch_options: Additional parameters for the + operation + :type job_schedule_patch_options: + ~azure.batch.models.JobSchedulePatchOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_patch_options is not None: + timeout = job_schedule_patch_options.timeout + client_request_id = None + if job_schedule_patch_options is not None: + client_request_id = job_schedule_patch_options.client_request_id + return_client_request_id = None + if job_schedule_patch_options is not None: + return_client_request_id = job_schedule_patch_options.return_client_request_id + ocp_date = None + if job_schedule_patch_options is not None: + ocp_date = job_schedule_patch_options.ocp_date + if_match = None + if job_schedule_patch_options is not None: + if_match = job_schedule_patch_options.if_match + if_none_match = None + if job_schedule_patch_options is not None: + if_none_match = job_schedule_patch_options.if_none_match + if_modified_since = None + if job_schedule_patch_options is not None: + if_modified_since = job_schedule_patch_options.if_modified_since + if_unmodified_since = None + if job_schedule_patch_options is not None: + if_unmodified_since = job_schedule_patch_options.if_unmodified_since + + # Construct URL + url = self.patch.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job_schedule_patch_parameter, 'JobSchedulePatchParameter') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + patch.metadata = {'url': '/jobschedules/{jobScheduleId}'} + + def update( + self, job_schedule_id, job_schedule_update_parameter, job_schedule_update_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified job schedule. + + This fully replaces all the updatable properties of the job schedule. + For example, if the schedule property is not specified with this + request, then the Batch service will remove the existing schedule. + Changes to a job schedule only impact jobs created by the schedule + after the update has taken place; currently running jobs are + unaffected. + + :param job_schedule_id: The ID of the job schedule to update. + :type job_schedule_id: str + :param job_schedule_update_parameter: The parameters for the request. + :type job_schedule_update_parameter: + ~azure.batch.models.JobScheduleUpdateParameter + :param job_schedule_update_options: Additional parameters for the + operation + :type job_schedule_update_options: + ~azure.batch.models.JobScheduleUpdateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_update_options is not None: + timeout = job_schedule_update_options.timeout + client_request_id = None + if job_schedule_update_options is not None: + client_request_id = job_schedule_update_options.client_request_id + return_client_request_id = None + if job_schedule_update_options is not None: + return_client_request_id = job_schedule_update_options.return_client_request_id + ocp_date = None + if job_schedule_update_options is not None: + ocp_date = job_schedule_update_options.ocp_date + if_match = None + if job_schedule_update_options is not None: + if_match = job_schedule_update_options.if_match + if_none_match = None + if job_schedule_update_options is not None: + if_none_match = job_schedule_update_options.if_none_match + if_modified_since = None + if job_schedule_update_options is not None: + if_modified_since = job_schedule_update_options.if_modified_since + if_unmodified_since = None + if job_schedule_update_options is not None: + if_unmodified_since = job_schedule_update_options.if_unmodified_since + + # Construct URL + url = self.update.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job_schedule_update_parameter, 'JobScheduleUpdateParameter') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + update.metadata = {'url': '/jobschedules/{jobScheduleId}'} + + def disable( + self, job_schedule_id, job_schedule_disable_options=None, custom_headers=None, raw=False, **operation_config): + """Disables a job schedule. + + No new jobs will be created until the job schedule is enabled again. + + :param job_schedule_id: The ID of the job schedule to disable. + :type job_schedule_id: str + :param job_schedule_disable_options: Additional parameters for the + operation + :type job_schedule_disable_options: + ~azure.batch.models.JobScheduleDisableOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_disable_options is not None: + timeout = job_schedule_disable_options.timeout + client_request_id = None + if job_schedule_disable_options is not None: + client_request_id = job_schedule_disable_options.client_request_id + return_client_request_id = None + if job_schedule_disable_options is not None: + return_client_request_id = job_schedule_disable_options.return_client_request_id + ocp_date = None + if job_schedule_disable_options is not None: + ocp_date = job_schedule_disable_options.ocp_date + if_match = None + if job_schedule_disable_options is not None: + if_match = job_schedule_disable_options.if_match + if_none_match = None + if job_schedule_disable_options is not None: + if_none_match = job_schedule_disable_options.if_none_match + if_modified_since = None + if job_schedule_disable_options is not None: + if_modified_since = job_schedule_disable_options.if_modified_since + if_unmodified_since = None + if job_schedule_disable_options is not None: + if_unmodified_since = job_schedule_disable_options.if_unmodified_since + + # Construct URL + url = self.disable.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + disable.metadata = {'url': '/jobschedules/{jobScheduleId}/disable'} + + def enable( + self, job_schedule_id, job_schedule_enable_options=None, custom_headers=None, raw=False, **operation_config): + """Enables a job schedule. + + :param job_schedule_id: The ID of the job schedule to enable. + :type job_schedule_id: str + :param job_schedule_enable_options: Additional parameters for the + operation + :type job_schedule_enable_options: + ~azure.batch.models.JobScheduleEnableOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_enable_options is not None: + timeout = job_schedule_enable_options.timeout + client_request_id = None + if job_schedule_enable_options is not None: + client_request_id = job_schedule_enable_options.client_request_id + return_client_request_id = None + if job_schedule_enable_options is not None: + return_client_request_id = job_schedule_enable_options.return_client_request_id + ocp_date = None + if job_schedule_enable_options is not None: + ocp_date = job_schedule_enable_options.ocp_date + if_match = None + if job_schedule_enable_options is not None: + if_match = job_schedule_enable_options.if_match + if_none_match = None + if job_schedule_enable_options is not None: + if_none_match = job_schedule_enable_options.if_none_match + if_modified_since = None + if job_schedule_enable_options is not None: + if_modified_since = job_schedule_enable_options.if_modified_since + if_unmodified_since = None + if job_schedule_enable_options is not None: + if_unmodified_since = job_schedule_enable_options.if_unmodified_since + + # Construct URL + url = self.enable.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + enable.metadata = {'url': '/jobschedules/{jobScheduleId}/enable'} + + def terminate( + self, job_schedule_id, job_schedule_terminate_options=None, custom_headers=None, raw=False, **operation_config): + """Terminates a job schedule. + + :param job_schedule_id: The ID of the job schedule to terminates. + :type job_schedule_id: str + :param job_schedule_terminate_options: Additional parameters for the + operation + :type job_schedule_terminate_options: + ~azure.batch.models.JobScheduleTerminateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_terminate_options is not None: + timeout = job_schedule_terminate_options.timeout + client_request_id = None + if job_schedule_terminate_options is not None: + client_request_id = job_schedule_terminate_options.client_request_id + return_client_request_id = None + if job_schedule_terminate_options is not None: + return_client_request_id = job_schedule_terminate_options.return_client_request_id + ocp_date = None + if job_schedule_terminate_options is not None: + ocp_date = job_schedule_terminate_options.ocp_date + if_match = None + if job_schedule_terminate_options is not None: + if_match = job_schedule_terminate_options.if_match + if_none_match = None + if job_schedule_terminate_options is not None: + if_none_match = job_schedule_terminate_options.if_none_match + if_modified_since = None + if job_schedule_terminate_options is not None: + if_modified_since = job_schedule_terminate_options.if_modified_since + if_unmodified_since = None + if job_schedule_terminate_options is not None: + if_unmodified_since = job_schedule_terminate_options.if_unmodified_since + + # Construct URL + url = self.terminate.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + terminate.metadata = {'url': '/jobschedules/{jobScheduleId}/terminate'} + + def add( + self, cloud_job_schedule, job_schedule_add_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a job schedule to the specified account. + + :param cloud_job_schedule: The job schedule to be added. + :type cloud_job_schedule: ~azure.batch.models.JobScheduleAddParameter + :param job_schedule_add_options: Additional parameters for the + operation + :type job_schedule_add_options: + ~azure.batch.models.JobScheduleAddOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_add_options is not None: + timeout = job_schedule_add_options.timeout + client_request_id = None + if job_schedule_add_options is not None: + client_request_id = job_schedule_add_options.client_request_id + return_client_request_id = None + if job_schedule_add_options is not None: + return_client_request_id = job_schedule_add_options.return_client_request_id + ocp_date = None + if job_schedule_add_options is not None: + ocp_date = job_schedule_add_options.ocp_date + + # Construct URL + url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(cloud_job_schedule, 'JobScheduleAddParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add.metadata = {'url': '/jobschedules'} + + def list( + self, job_schedule_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the job schedules in the specified account. + + :param job_schedule_list_options: Additional parameters for the + operation + :type job_schedule_list_options: + ~azure.batch.models.JobScheduleListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of CloudJobSchedule + :rtype: + ~azure.batch.models.CloudJobSchedulePaged[~azure.batch.models.CloudJobSchedule] + :raises: + :class:`BatchErrorException` + """ + filter = None + if job_schedule_list_options is not None: + filter = job_schedule_list_options.filter + select = None + if job_schedule_list_options is not None: + select = job_schedule_list_options.select + expand = None + if job_schedule_list_options is not None: + expand = job_schedule_list_options.expand + max_results = None + if job_schedule_list_options is not None: + max_results = job_schedule_list_options.max_results + timeout = None + if job_schedule_list_options is not None: + timeout = job_schedule_list_options.timeout + client_request_id = None + if job_schedule_list_options is not None: + client_request_id = job_schedule_list_options.client_request_id + return_client_request_id = None + if job_schedule_list_options is not None: + return_client_request_id = job_schedule_list_options.return_client_request_id + ocp_date = None + if job_schedule_list_options is not None: + ocp_date = job_schedule_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CloudJobSchedulePaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CloudJobSchedulePaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/jobschedules'} diff --git a/azext/generated/sdk/batch/v2018_08_01/operations/pool_operations.py b/azext/generated/sdk/batch/v2018_08_01/operations/pool_operations.py new file mode 100644 index 00000000..699cb4ff --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/operations/pool_operations.py @@ -0,0 +1,1635 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class PoolOperations(object): + """PoolOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2018-08-01.7.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2018-08-01.7.0" + + self.config = config + + def list_usage_metrics( + self, pool_list_usage_metrics_options=None, custom_headers=None, raw=False, **operation_config): + """Lists the usage metrics, aggregated by pool across individual time + intervals, for the specified account. + + If you do not specify a $filter clause including a poolId, the response + includes all pools that existed in the account in the time range of the + returned aggregation intervals. If you do not specify a $filter clause + including a startTime or endTime these filters default to the start and + end times of the last aggregation interval currently available; that + is, only the last aggregation interval is returned. + + :param pool_list_usage_metrics_options: Additional parameters for the + operation + :type pool_list_usage_metrics_options: + ~azure.batch.models.PoolListUsageMetricsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of PoolUsageMetrics + :rtype: + ~azure.batch.models.PoolUsageMetricsPaged[~azure.batch.models.PoolUsageMetrics] + :raises: + :class:`BatchErrorException` + """ + start_time = None + if pool_list_usage_metrics_options is not None: + start_time = pool_list_usage_metrics_options.start_time + end_time = None + if pool_list_usage_metrics_options is not None: + end_time = pool_list_usage_metrics_options.end_time + filter = None + if pool_list_usage_metrics_options is not None: + filter = pool_list_usage_metrics_options.filter + max_results = None + if pool_list_usage_metrics_options is not None: + max_results = pool_list_usage_metrics_options.max_results + timeout = None + if pool_list_usage_metrics_options is not None: + timeout = pool_list_usage_metrics_options.timeout + client_request_id = None + if pool_list_usage_metrics_options is not None: + client_request_id = pool_list_usage_metrics_options.client_request_id + return_client_request_id = None + if pool_list_usage_metrics_options is not None: + return_client_request_id = pool_list_usage_metrics_options.return_client_request_id + ocp_date = None + if pool_list_usage_metrics_options is not None: + ocp_date = pool_list_usage_metrics_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_usage_metrics.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if start_time is not None: + query_parameters['starttime'] = self._serialize.query("start_time", start_time, 'iso-8601') + if end_time is not None: + query_parameters['endtime'] = self._serialize.query("end_time", end_time, 'iso-8601') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.PoolUsageMetricsPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.PoolUsageMetricsPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_usage_metrics.metadata = {'url': '/poolusagemetrics'} + + def get_all_lifetime_statistics( + self, pool_get_all_lifetime_statistics_options=None, custom_headers=None, raw=False, **operation_config): + """Gets lifetime summary statistics for all of the pools in the specified + account. + + Statistics are aggregated across all pools that have ever existed in + the account, from account creation to the last update time of the + statistics. The statistics may not be immediately available. The Batch + service performs periodic roll-up of statistics. The typical delay is + about 30 minutes. + + :param pool_get_all_lifetime_statistics_options: Additional parameters + for the operation + :type pool_get_all_lifetime_statistics_options: + ~azure.batch.models.PoolGetAllLifetimeStatisticsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PoolStatistics or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.PoolStatistics or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_get_all_lifetime_statistics_options is not None: + timeout = pool_get_all_lifetime_statistics_options.timeout + client_request_id = None + if pool_get_all_lifetime_statistics_options is not None: + client_request_id = pool_get_all_lifetime_statistics_options.client_request_id + return_client_request_id = None + if pool_get_all_lifetime_statistics_options is not None: + return_client_request_id = pool_get_all_lifetime_statistics_options.return_client_request_id + ocp_date = None + if pool_get_all_lifetime_statistics_options is not None: + ocp_date = pool_get_all_lifetime_statistics_options.ocp_date + + # Construct URL + url = self.get_all_lifetime_statistics.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('PoolStatistics', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_all_lifetime_statistics.metadata = {'url': '/lifetimepoolstats'} + + def add( + self, pool, pool_add_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a pool to the specified account. + + When naming pools, avoid including sensitive information such as user + names or secret project names. This information may appear in telemetry + logs accessible to Microsoft Support engineers. + + :param pool: The pool to be added. + :type pool: ~azure.batch.models.PoolAddParameter + :param pool_add_options: Additional parameters for the operation + :type pool_add_options: ~azure.batch.models.PoolAddOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_add_options is not None: + timeout = pool_add_options.timeout + client_request_id = None + if pool_add_options is not None: + client_request_id = pool_add_options.client_request_id + return_client_request_id = None + if pool_add_options is not None: + return_client_request_id = pool_add_options.return_client_request_id + ocp_date = None + if pool_add_options is not None: + ocp_date = pool_add_options.ocp_date + + # Construct URL + url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool, 'PoolAddParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add.metadata = {'url': '/pools'} + + def list( + self, pool_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the pools in the specified account. + + :param pool_list_options: Additional parameters for the operation + :type pool_list_options: ~azure.batch.models.PoolListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of CloudPool + :rtype: + ~azure.batch.models.CloudPoolPaged[~azure.batch.models.CloudPool] + :raises: + :class:`BatchErrorException` + """ + filter = None + if pool_list_options is not None: + filter = pool_list_options.filter + select = None + if pool_list_options is not None: + select = pool_list_options.select + expand = None + if pool_list_options is not None: + expand = pool_list_options.expand + max_results = None + if pool_list_options is not None: + max_results = pool_list_options.max_results + timeout = None + if pool_list_options is not None: + timeout = pool_list_options.timeout + client_request_id = None + if pool_list_options is not None: + client_request_id = pool_list_options.client_request_id + return_client_request_id = None + if pool_list_options is not None: + return_client_request_id = pool_list_options.return_client_request_id + ocp_date = None + if pool_list_options is not None: + ocp_date = pool_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CloudPoolPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CloudPoolPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/pools'} + + def delete( + self, pool_id, pool_delete_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a pool from the specified account. + + When you request that a pool be deleted, the following actions occur: + the pool state is set to deleting; any ongoing resize operation on the + pool are stopped; the Batch service starts resizing the pool to zero + nodes; any tasks running on existing nodes are terminated and requeued + (as if a resize pool operation had been requested with the default + requeue option); finally, the pool is removed from the system. Because + running tasks are requeued, the user can rerun these tasks by updating + their job to target a different pool. The tasks can then run on the new + pool. If you want to override the requeue behavior, then you should + call resize pool explicitly to shrink the pool to zero size before + deleting the pool. If you call an Update, Patch or Delete API on a pool + in the deleting state, it will fail with HTTP status code 409 with + error code PoolBeingDeleted. + + :param pool_id: The ID of the pool to delete. + :type pool_id: str + :param pool_delete_options: Additional parameters for the operation + :type pool_delete_options: ~azure.batch.models.PoolDeleteOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_delete_options is not None: + timeout = pool_delete_options.timeout + client_request_id = None + if pool_delete_options is not None: + client_request_id = pool_delete_options.client_request_id + return_client_request_id = None + if pool_delete_options is not None: + return_client_request_id = pool_delete_options.return_client_request_id + ocp_date = None + if pool_delete_options is not None: + ocp_date = pool_delete_options.ocp_date + if_match = None + if pool_delete_options is not None: + if_match = pool_delete_options.if_match + if_none_match = None + if pool_delete_options is not None: + if_none_match = pool_delete_options.if_none_match + if_modified_since = None + if pool_delete_options is not None: + if_modified_since = pool_delete_options.if_modified_since + if_unmodified_since = None + if pool_delete_options is not None: + if_unmodified_since = pool_delete_options.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete.metadata = {'url': '/pools/{poolId}'} + + def exists( + self, pool_id, pool_exists_options=None, custom_headers=None, raw=False, **operation_config): + """Gets basic properties of a pool. + + :param pool_id: The ID of the pool to get. + :type pool_id: str + :param pool_exists_options: Additional parameters for the operation + :type pool_exists_options: ~azure.batch.models.PoolExistsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: bool or ClientRawResponse if raw=true + :rtype: bool or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_exists_options is not None: + timeout = pool_exists_options.timeout + client_request_id = None + if pool_exists_options is not None: + client_request_id = pool_exists_options.client_request_id + return_client_request_id = None + if pool_exists_options is not None: + return_client_request_id = pool_exists_options.return_client_request_id + ocp_date = None + if pool_exists_options is not None: + ocp_date = pool_exists_options.ocp_date + if_match = None + if pool_exists_options is not None: + if_match = pool_exists_options.if_match + if_none_match = None + if pool_exists_options is not None: + if_none_match = pool_exists_options.if_none_match + if_modified_since = None + if pool_exists_options is not None: + if_modified_since = pool_exists_options.if_modified_since + if_unmodified_since = None + if pool_exists_options is not None: + if_unmodified_since = pool_exists_options.if_unmodified_since + + # Construct URL + url = self.exists.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 404]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = (response.status_code == 200) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + }) + return client_raw_response + return deserialized + exists.metadata = {'url': '/pools/{poolId}'} + + def get( + self, pool_id, pool_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified pool. + + :param pool_id: The ID of the pool to get. + :type pool_id: str + :param pool_get_options: Additional parameters for the operation + :type pool_get_options: ~azure.batch.models.PoolGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: CloudPool or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.CloudPool or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if pool_get_options is not None: + select = pool_get_options.select + expand = None + if pool_get_options is not None: + expand = pool_get_options.expand + timeout = None + if pool_get_options is not None: + timeout = pool_get_options.timeout + client_request_id = None + if pool_get_options is not None: + client_request_id = pool_get_options.client_request_id + return_client_request_id = None + if pool_get_options is not None: + return_client_request_id = pool_get_options.return_client_request_id + ocp_date = None + if pool_get_options is not None: + ocp_date = pool_get_options.ocp_date + if_match = None + if pool_get_options is not None: + if_match = pool_get_options.if_match + if_none_match = None + if pool_get_options is not None: + if_none_match = pool_get_options.if_none_match + if_modified_since = None + if pool_get_options is not None: + if_modified_since = pool_get_options.if_modified_since + if_unmodified_since = None + if pool_get_options is not None: + if_unmodified_since = pool_get_options.if_unmodified_since + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('CloudPool', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/pools/{poolId}'} + + def patch( + self, pool_id, pool_patch_parameter, pool_patch_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified pool. + + This only replaces the pool properties specified in the request. For + example, if the pool has a start task associated with it, and a request + does not specify a start task element, then the pool keeps the existing + start task. + + :param pool_id: The ID of the pool to update. + :type pool_id: str + :param pool_patch_parameter: The parameters for the request. + :type pool_patch_parameter: ~azure.batch.models.PoolPatchParameter + :param pool_patch_options: Additional parameters for the operation + :type pool_patch_options: ~azure.batch.models.PoolPatchOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_patch_options is not None: + timeout = pool_patch_options.timeout + client_request_id = None + if pool_patch_options is not None: + client_request_id = pool_patch_options.client_request_id + return_client_request_id = None + if pool_patch_options is not None: + return_client_request_id = pool_patch_options.return_client_request_id + ocp_date = None + if pool_patch_options is not None: + ocp_date = pool_patch_options.ocp_date + if_match = None + if pool_patch_options is not None: + if_match = pool_patch_options.if_match + if_none_match = None + if pool_patch_options is not None: + if_none_match = pool_patch_options.if_none_match + if_modified_since = None + if pool_patch_options is not None: + if_modified_since = pool_patch_options.if_modified_since + if_unmodified_since = None + if pool_patch_options is not None: + if_unmodified_since = pool_patch_options.if_unmodified_since + + # Construct URL + url = self.patch.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool_patch_parameter, 'PoolPatchParameter') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + patch.metadata = {'url': '/pools/{poolId}'} + + def disable_auto_scale( + self, pool_id, pool_disable_auto_scale_options=None, custom_headers=None, raw=False, **operation_config): + """Disables automatic scaling for a pool. + + :param pool_id: The ID of the pool on which to disable automatic + scaling. + :type pool_id: str + :param pool_disable_auto_scale_options: Additional parameters for the + operation + :type pool_disable_auto_scale_options: + ~azure.batch.models.PoolDisableAutoScaleOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_disable_auto_scale_options is not None: + timeout = pool_disable_auto_scale_options.timeout + client_request_id = None + if pool_disable_auto_scale_options is not None: + client_request_id = pool_disable_auto_scale_options.client_request_id + return_client_request_id = None + if pool_disable_auto_scale_options is not None: + return_client_request_id = pool_disable_auto_scale_options.return_client_request_id + ocp_date = None + if pool_disable_auto_scale_options is not None: + ocp_date = pool_disable_auto_scale_options.ocp_date + + # Construct URL + url = self.disable_auto_scale.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + disable_auto_scale.metadata = {'url': '/pools/{poolId}/disableautoscale'} + + def enable_auto_scale( + self, pool_id, auto_scale_formula=None, auto_scale_evaluation_interval=None, pool_enable_auto_scale_options=None, custom_headers=None, raw=False, **operation_config): + """Enables automatic scaling for a pool. + + You cannot enable automatic scaling on a pool if a resize operation is + in progress on the pool. If automatic scaling of the pool is currently + disabled, you must specify a valid autoscale formula as part of the + request. If automatic scaling of the pool is already enabled, you may + specify a new autoscale formula and/or a new evaluation interval. You + cannot call this API for the same pool more than once every 30 seconds. + + :param pool_id: The ID of the pool on which to enable automatic + scaling. + :type pool_id: str + :param auto_scale_formula: The formula for the desired number of + compute nodes in the pool. The formula is checked for validity before + it is applied to the pool. If the formula is not valid, the Batch + service rejects the request with detailed error information. For more + information about specifying this formula, see Automatically scale + compute nodes in an Azure Batch pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the pool size according to the autoscale formula. + The default value is 15 minutes. The minimum and maximum value are 5 + minutes and 168 hours respectively. If you specify a value less than 5 + minutes or greater than 168 hours, the Batch service rejects the + request with an invalid property value error; if you are calling the + REST API directly, the HTTP status code is 400 (Bad Request). If you + specify a new interval, then the existing autoscale evaluation + schedule will be stopped and a new autoscale evaluation schedule will + be started, with its starting time being the time when this request + was issued. + :type auto_scale_evaluation_interval: timedelta + :param pool_enable_auto_scale_options: Additional parameters for the + operation + :type pool_enable_auto_scale_options: + ~azure.batch.models.PoolEnableAutoScaleOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_enable_auto_scale_options is not None: + timeout = pool_enable_auto_scale_options.timeout + client_request_id = None + if pool_enable_auto_scale_options is not None: + client_request_id = pool_enable_auto_scale_options.client_request_id + return_client_request_id = None + if pool_enable_auto_scale_options is not None: + return_client_request_id = pool_enable_auto_scale_options.return_client_request_id + ocp_date = None + if pool_enable_auto_scale_options is not None: + ocp_date = pool_enable_auto_scale_options.ocp_date + if_match = None + if pool_enable_auto_scale_options is not None: + if_match = pool_enable_auto_scale_options.if_match + if_none_match = None + if pool_enable_auto_scale_options is not None: + if_none_match = pool_enable_auto_scale_options.if_none_match + if_modified_since = None + if pool_enable_auto_scale_options is not None: + if_modified_since = pool_enable_auto_scale_options.if_modified_since + if_unmodified_since = None + if pool_enable_auto_scale_options is not None: + if_unmodified_since = pool_enable_auto_scale_options.if_unmodified_since + pool_enable_auto_scale_parameter = models.PoolEnableAutoScaleParameter(auto_scale_formula=auto_scale_formula, auto_scale_evaluation_interval=auto_scale_evaluation_interval) + + # Construct URL + url = self.enable_auto_scale.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool_enable_auto_scale_parameter, 'PoolEnableAutoScaleParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + enable_auto_scale.metadata = {'url': '/pools/{poolId}/enableautoscale'} + + def evaluate_auto_scale( + self, pool_id, auto_scale_formula, pool_evaluate_auto_scale_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the result of evaluating an automatic scaling formula on the pool. + + This API is primarily for validating an autoscale formula, as it simply + returns the result without applying the formula to the pool. The pool + must have auto scaling enabled in order to evaluate a formula. + + :param pool_id: The ID of the pool on which to evaluate the automatic + scaling formula. + :type pool_id: str + :param auto_scale_formula: The formula for the desired number of + compute nodes in the pool. The formula is validated and its results + calculated, but it is not applied to the pool. To apply the formula to + the pool, 'Enable automatic scaling on a pool'. For more information + about specifying this formula, see Automatically scale compute nodes + in an Azure Batch pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + :param pool_evaluate_auto_scale_options: Additional parameters for the + operation + :type pool_evaluate_auto_scale_options: + ~azure.batch.models.PoolEvaluateAutoScaleOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: AutoScaleRun or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.AutoScaleRun or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_evaluate_auto_scale_options is not None: + timeout = pool_evaluate_auto_scale_options.timeout + client_request_id = None + if pool_evaluate_auto_scale_options is not None: + client_request_id = pool_evaluate_auto_scale_options.client_request_id + return_client_request_id = None + if pool_evaluate_auto_scale_options is not None: + return_client_request_id = pool_evaluate_auto_scale_options.return_client_request_id + ocp_date = None + if pool_evaluate_auto_scale_options is not None: + ocp_date = pool_evaluate_auto_scale_options.ocp_date + pool_evaluate_auto_scale_parameter = models.PoolEvaluateAutoScaleParameter(auto_scale_formula=auto_scale_formula) + + # Construct URL + url = self.evaluate_auto_scale.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool_evaluate_auto_scale_parameter, 'PoolEvaluateAutoScaleParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('AutoScaleRun', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + evaluate_auto_scale.metadata = {'url': '/pools/{poolId}/evaluateautoscale'} + + def resize( + self, pool_id, pool_resize_parameter, pool_resize_options=None, custom_headers=None, raw=False, **operation_config): + """Changes the number of compute nodes that are assigned to a pool. + + You can only resize a pool when its allocation state is steady. If the + pool is already resizing, the request fails with status code 409. When + you resize a pool, the pool's allocation state changes from steady to + resizing. You cannot resize pools which are configured for automatic + scaling. If you try to do this, the Batch service returns an error 409. + If you resize a pool downwards, the Batch service chooses which nodes + to remove. To remove specific nodes, use the pool remove nodes API + instead. + + :param pool_id: The ID of the pool to resize. + :type pool_id: str + :param pool_resize_parameter: The parameters for the request. + :type pool_resize_parameter: ~azure.batch.models.PoolResizeParameter + :param pool_resize_options: Additional parameters for the operation + :type pool_resize_options: ~azure.batch.models.PoolResizeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_resize_options is not None: + timeout = pool_resize_options.timeout + client_request_id = None + if pool_resize_options is not None: + client_request_id = pool_resize_options.client_request_id + return_client_request_id = None + if pool_resize_options is not None: + return_client_request_id = pool_resize_options.return_client_request_id + ocp_date = None + if pool_resize_options is not None: + ocp_date = pool_resize_options.ocp_date + if_match = None + if pool_resize_options is not None: + if_match = pool_resize_options.if_match + if_none_match = None + if pool_resize_options is not None: + if_none_match = pool_resize_options.if_none_match + if_modified_since = None + if pool_resize_options is not None: + if_modified_since = pool_resize_options.if_modified_since + if_unmodified_since = None + if pool_resize_options is not None: + if_unmodified_since = pool_resize_options.if_unmodified_since + + # Construct URL + url = self.resize.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool_resize_parameter, 'PoolResizeParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + resize.metadata = {'url': '/pools/{poolId}/resize'} + + def stop_resize( + self, pool_id, pool_stop_resize_options=None, custom_headers=None, raw=False, **operation_config): + """Stops an ongoing resize operation on the pool. + + This does not restore the pool to its previous state before the resize + operation: it only stops any further changes being made, and the pool + maintains its current state. After stopping, the pool stabilizes at the + number of nodes it was at when the stop operation was done. During the + stop operation, the pool allocation state changes first to stopping and + then to steady. A resize operation need not be an explicit resize pool + request; this API can also be used to halt the initial sizing of the + pool when it is created. + + :param pool_id: The ID of the pool whose resizing you want to stop. + :type pool_id: str + :param pool_stop_resize_options: Additional parameters for the + operation + :type pool_stop_resize_options: + ~azure.batch.models.PoolStopResizeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_stop_resize_options is not None: + timeout = pool_stop_resize_options.timeout + client_request_id = None + if pool_stop_resize_options is not None: + client_request_id = pool_stop_resize_options.client_request_id + return_client_request_id = None + if pool_stop_resize_options is not None: + return_client_request_id = pool_stop_resize_options.return_client_request_id + ocp_date = None + if pool_stop_resize_options is not None: + ocp_date = pool_stop_resize_options.ocp_date + if_match = None + if pool_stop_resize_options is not None: + if_match = pool_stop_resize_options.if_match + if_none_match = None + if pool_stop_resize_options is not None: + if_none_match = pool_stop_resize_options.if_none_match + if_modified_since = None + if pool_stop_resize_options is not None: + if_modified_since = pool_stop_resize_options.if_modified_since + if_unmodified_since = None + if pool_stop_resize_options is not None: + if_unmodified_since = pool_stop_resize_options.if_unmodified_since + + # Construct URL + url = self.stop_resize.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + stop_resize.metadata = {'url': '/pools/{poolId}/stopresize'} + + def update_properties( + self, pool_id, pool_update_properties_parameter, pool_update_properties_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified pool. + + This fully replaces all the updatable properties of the pool. For + example, if the pool has a start task associated with it and if start + task is not specified with this request, then the Batch service will + remove the existing start task. + + :param pool_id: The ID of the pool to update. + :type pool_id: str + :param pool_update_properties_parameter: The parameters for the + request. + :type pool_update_properties_parameter: + ~azure.batch.models.PoolUpdatePropertiesParameter + :param pool_update_properties_options: Additional parameters for the + operation + :type pool_update_properties_options: + ~azure.batch.models.PoolUpdatePropertiesOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_update_properties_options is not None: + timeout = pool_update_properties_options.timeout + client_request_id = None + if pool_update_properties_options is not None: + client_request_id = pool_update_properties_options.client_request_id + return_client_request_id = None + if pool_update_properties_options is not None: + return_client_request_id = pool_update_properties_options.return_client_request_id + ocp_date = None + if pool_update_properties_options is not None: + ocp_date = pool_update_properties_options.ocp_date + + # Construct URL + url = self.update_properties.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool_update_properties_parameter, 'PoolUpdatePropertiesParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + update_properties.metadata = {'url': '/pools/{poolId}/updateproperties'} + + def remove_nodes( + self, pool_id, node_remove_parameter, pool_remove_nodes_options=None, custom_headers=None, raw=False, **operation_config): + """Removes compute nodes from the specified pool. + + This operation can only run when the allocation state of the pool is + steady. When this operation runs, the allocation state changes from + steady to resizing. + + :param pool_id: The ID of the pool from which you want to remove + nodes. + :type pool_id: str + :param node_remove_parameter: The parameters for the request. + :type node_remove_parameter: ~azure.batch.models.NodeRemoveParameter + :param pool_remove_nodes_options: Additional parameters for the + operation + :type pool_remove_nodes_options: + ~azure.batch.models.PoolRemoveNodesOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_remove_nodes_options is not None: + timeout = pool_remove_nodes_options.timeout + client_request_id = None + if pool_remove_nodes_options is not None: + client_request_id = pool_remove_nodes_options.client_request_id + return_client_request_id = None + if pool_remove_nodes_options is not None: + return_client_request_id = pool_remove_nodes_options.return_client_request_id + ocp_date = None + if pool_remove_nodes_options is not None: + ocp_date = pool_remove_nodes_options.ocp_date + if_match = None + if pool_remove_nodes_options is not None: + if_match = pool_remove_nodes_options.if_match + if_none_match = None + if pool_remove_nodes_options is not None: + if_none_match = pool_remove_nodes_options.if_none_match + if_modified_since = None + if pool_remove_nodes_options is not None: + if_modified_since = pool_remove_nodes_options.if_modified_since + if_unmodified_since = None + if pool_remove_nodes_options is not None: + if_unmodified_since = pool_remove_nodes_options.if_unmodified_since + + # Construct URL + url = self.remove_nodes.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(node_remove_parameter, 'NodeRemoveParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + remove_nodes.metadata = {'url': '/pools/{poolId}/removenodes'} diff --git a/azext/generated/sdk/batch/v2018_08_01/operations/task_operations.py b/azext/generated/sdk/batch/v2018_08_01/operations/task_operations.py new file mode 100644 index 00000000..fba18319 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/operations/task_operations.py @@ -0,0 +1,1027 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class TaskOperations(object): + """TaskOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2018-08-01.7.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2018-08-01.7.0" + + self.config = config + + def add( + self, job_id, task, task_add_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a task to the specified job. + + The maximum lifetime of a task from addition to completion is 180 days. + If a task has not completed within 180 days of being added it will be + terminated by the Batch service and left in whatever state it was in at + that time. + + :param job_id: The ID of the job to which the task is to be added. + :type job_id: str + :param task: The task to be added. + :type task: ~azure.batch.models.TaskAddParameter + :param task_add_options: Additional parameters for the operation + :type task_add_options: ~azure.batch.models.TaskAddOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_add_options is not None: + timeout = task_add_options.timeout + client_request_id = None + if task_add_options is not None: + client_request_id = task_add_options.client_request_id + return_client_request_id = None + if task_add_options is not None: + return_client_request_id = task_add_options.return_client_request_id + ocp_date = None + if task_add_options is not None: + ocp_date = task_add_options.ocp_date + + # Construct URL + url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(task, 'TaskAddParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add.metadata = {'url': '/jobs/{jobId}/tasks'} + + def list( + self, job_id, task_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the tasks that are associated with the specified job. + + For multi-instance tasks, information such as affinityId, executionInfo + and nodeInfo refer to the primary task. Use the list subtasks API to + retrieve information about subtasks. + + :param job_id: The ID of the job. + :type job_id: str + :param task_list_options: Additional parameters for the operation + :type task_list_options: ~azure.batch.models.TaskListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of CloudTask + :rtype: + ~azure.batch.models.CloudTaskPaged[~azure.batch.models.CloudTask] + :raises: + :class:`BatchErrorException` + """ + filter = None + if task_list_options is not None: + filter = task_list_options.filter + select = None + if task_list_options is not None: + select = task_list_options.select + expand = None + if task_list_options is not None: + expand = task_list_options.expand + max_results = None + if task_list_options is not None: + max_results = task_list_options.max_results + timeout = None + if task_list_options is not None: + timeout = task_list_options.timeout + client_request_id = None + if task_list_options is not None: + client_request_id = task_list_options.client_request_id + return_client_request_id = None + if task_list_options is not None: + return_client_request_id = task_list_options.return_client_request_id + ocp_date = None + if task_list_options is not None: + ocp_date = task_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CloudTaskPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CloudTaskPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/jobs/{jobId}/tasks'} + + def add_collection( + self, job_id, value, task_add_collection_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a collection of tasks to the specified job. + + Note that each task must have a unique ID. The Batch service may not + return the results for each task in the same order the tasks were + submitted in this request. If the server times out or the connection is + closed during the request, the request may have been partially or fully + processed, or not at all. In such cases, the user should re-issue the + request. Note that it is up to the user to correctly handle failures + when re-issuing a request. For example, you should use the same task + IDs during a retry so that if the prior operation succeeded, the retry + will not create extra tasks unexpectedly. If the response contains any + tasks which failed to add, a client can retry the request. In a retry, + it is most efficient to resubmit only tasks that failed to add, and to + omit tasks that were successfully added on the first attempt. The + maximum lifetime of a task from addition to completion is 180 days. If + a task has not completed within 180 days of being added it will be + terminated by the Batch service and left in whatever state it was in at + that time. + + :param job_id: The ID of the job to which the task collection is to be + added. + :type job_id: str + :param value: The collection of tasks to add. The maximum count of + tasks is 100. The total serialized size of this collection must be + less than 1MB. If it is greater than 1MB (for example if each task has + 100's of resource files or environment variables), the request will + fail with code 'RequestBodyTooLarge' and should be retried again with + fewer tasks. + :type value: list[~azure.batch.models.TaskAddParameter] + :param task_add_collection_options: Additional parameters for the + operation + :type task_add_collection_options: + ~azure.batch.models.TaskAddCollectionOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: TaskAddCollectionResult or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.TaskAddCollectionResult or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_add_collection_options is not None: + timeout = task_add_collection_options.timeout + client_request_id = None + if task_add_collection_options is not None: + client_request_id = task_add_collection_options.client_request_id + return_client_request_id = None + if task_add_collection_options is not None: + return_client_request_id = task_add_collection_options.return_client_request_id + ocp_date = None + if task_add_collection_options is not None: + ocp_date = task_add_collection_options.ocp_date + task_collection = models.TaskAddCollectionParameter(value=value) + + # Construct URL + url = self.add_collection.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(task_collection, 'TaskAddCollectionParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('TaskAddCollectionResult', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + add_collection.metadata = {'url': '/jobs/{jobId}/addtaskcollection'} + + def delete( + self, job_id, task_id, task_delete_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a task from the specified job. + + When a task is deleted, all of the files in its directory on the + compute node where it ran are also deleted (regardless of the retention + time). For multi-instance tasks, the delete task operation applies + synchronously to the primary task; subtasks and their files are then + deleted asynchronously in the background. + + :param job_id: The ID of the job from which to delete the task. + :type job_id: str + :param task_id: The ID of the task to delete. + :type task_id: str + :param task_delete_options: Additional parameters for the operation + :type task_delete_options: ~azure.batch.models.TaskDeleteOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_delete_options is not None: + timeout = task_delete_options.timeout + client_request_id = None + if task_delete_options is not None: + client_request_id = task_delete_options.client_request_id + return_client_request_id = None + if task_delete_options is not None: + return_client_request_id = task_delete_options.return_client_request_id + ocp_date = None + if task_delete_options is not None: + ocp_date = task_delete_options.ocp_date + if_match = None + if task_delete_options is not None: + if_match = task_delete_options.if_match + if_none_match = None + if task_delete_options is not None: + if_none_match = task_delete_options.if_none_match + if_modified_since = None + if task_delete_options is not None: + if_modified_since = task_delete_options.if_modified_since + if_unmodified_since = None + if task_delete_options is not None: + if_unmodified_since = task_delete_options.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}'} + + def get( + self, job_id, task_id, task_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified task. + + For multi-instance tasks, information such as affinityId, executionInfo + and nodeInfo refer to the primary task. Use the list subtasks API to + retrieve information about subtasks. + + :param job_id: The ID of the job that contains the task. + :type job_id: str + :param task_id: The ID of the task to get information about. + :type task_id: str + :param task_get_options: Additional parameters for the operation + :type task_get_options: ~azure.batch.models.TaskGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: CloudTask or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.CloudTask or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if task_get_options is not None: + select = task_get_options.select + expand = None + if task_get_options is not None: + expand = task_get_options.expand + timeout = None + if task_get_options is not None: + timeout = task_get_options.timeout + client_request_id = None + if task_get_options is not None: + client_request_id = task_get_options.client_request_id + return_client_request_id = None + if task_get_options is not None: + return_client_request_id = task_get_options.return_client_request_id + ocp_date = None + if task_get_options is not None: + ocp_date = task_get_options.ocp_date + if_match = None + if task_get_options is not None: + if_match = task_get_options.if_match + if_none_match = None + if task_get_options is not None: + if_none_match = task_get_options.if_none_match + if_modified_since = None + if task_get_options is not None: + if_modified_since = task_get_options.if_modified_since + if_unmodified_since = None + if task_get_options is not None: + if_unmodified_since = task_get_options.if_unmodified_since + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('CloudTask', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}'} + + def update( + self, job_id, task_id, constraints=None, task_update_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified task. + + :param job_id: The ID of the job containing the task. + :type job_id: str + :param task_id: The ID of the task to update. + :type task_id: str + :param constraints: Constraints that apply to this task. If omitted, + the task is given the default constraints. For multi-instance tasks, + updating the retention time applies only to the primary task and not + subtasks. + :type constraints: ~azure.batch.models.TaskConstraints + :param task_update_options: Additional parameters for the operation + :type task_update_options: ~azure.batch.models.TaskUpdateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_update_options is not None: + timeout = task_update_options.timeout + client_request_id = None + if task_update_options is not None: + client_request_id = task_update_options.client_request_id + return_client_request_id = None + if task_update_options is not None: + return_client_request_id = task_update_options.return_client_request_id + ocp_date = None + if task_update_options is not None: + ocp_date = task_update_options.ocp_date + if_match = None + if task_update_options is not None: + if_match = task_update_options.if_match + if_none_match = None + if task_update_options is not None: + if_none_match = task_update_options.if_none_match + if_modified_since = None + if task_update_options is not None: + if_modified_since = task_update_options.if_modified_since + if_unmodified_since = None + if task_update_options is not None: + if_unmodified_since = task_update_options.if_unmodified_since + task_update_parameter = models.TaskUpdateParameter(constraints=constraints) + + # Construct URL + url = self.update.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(task_update_parameter, 'TaskUpdateParameter') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + update.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}'} + + def list_subtasks( + self, job_id, task_id, task_list_subtasks_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the subtasks that are associated with the specified + multi-instance task. + + If the task is not a multi-instance task then this returns an empty + collection. + + :param job_id: The ID of the job. + :type job_id: str + :param task_id: The ID of the task. + :type task_id: str + :param task_list_subtasks_options: Additional parameters for the + operation + :type task_list_subtasks_options: + ~azure.batch.models.TaskListSubtasksOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: CloudTaskListSubtasksResult or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.CloudTaskListSubtasksResult or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if task_list_subtasks_options is not None: + select = task_list_subtasks_options.select + timeout = None + if task_list_subtasks_options is not None: + timeout = task_list_subtasks_options.timeout + client_request_id = None + if task_list_subtasks_options is not None: + client_request_id = task_list_subtasks_options.client_request_id + return_client_request_id = None + if task_list_subtasks_options is not None: + return_client_request_id = task_list_subtasks_options.return_client_request_id + ocp_date = None + if task_list_subtasks_options is not None: + ocp_date = task_list_subtasks_options.ocp_date + + # Construct URL + url = self.list_subtasks.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('CloudTaskListSubtasksResult', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + list_subtasks.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/subtasksinfo'} + + def terminate( + self, job_id, task_id, task_terminate_options=None, custom_headers=None, raw=False, **operation_config): + """Terminates the specified task. + + When the task has been terminated, it moves to the completed state. For + multi-instance tasks, the terminate task operation applies + synchronously to the primary task; subtasks are then terminated + asynchronously in the background. + + :param job_id: The ID of the job containing the task. + :type job_id: str + :param task_id: The ID of the task to terminate. + :type task_id: str + :param task_terminate_options: Additional parameters for the operation + :type task_terminate_options: ~azure.batch.models.TaskTerminateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_terminate_options is not None: + timeout = task_terminate_options.timeout + client_request_id = None + if task_terminate_options is not None: + client_request_id = task_terminate_options.client_request_id + return_client_request_id = None + if task_terminate_options is not None: + return_client_request_id = task_terminate_options.return_client_request_id + ocp_date = None + if task_terminate_options is not None: + ocp_date = task_terminate_options.ocp_date + if_match = None + if task_terminate_options is not None: + if_match = task_terminate_options.if_match + if_none_match = None + if task_terminate_options is not None: + if_none_match = task_terminate_options.if_none_match + if_modified_since = None + if task_terminate_options is not None: + if_modified_since = task_terminate_options.if_modified_since + if_unmodified_since = None + if task_terminate_options is not None: + if_unmodified_since = task_terminate_options.if_unmodified_since + + # Construct URL + url = self.terminate.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + terminate.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/terminate'} + + def reactivate( + self, job_id, task_id, task_reactivate_options=None, custom_headers=None, raw=False, **operation_config): + """Reactivates a task, allowing it to run again even if its retry count + has been exhausted. + + Reactivation makes a task eligible to be retried again up to its + maximum retry count. The task's state is changed to active. As the task + is no longer in the completed state, any previous exit code or failure + information is no longer available after reactivation. Each time a task + is reactivated, its retry count is reset to 0. Reactivation will fail + for tasks that are not completed or that previously completed + successfully (with an exit code of 0). Additionally, it will fail if + the job has completed (or is terminating or deleting). + + :param job_id: The ID of the job containing the task. + :type job_id: str + :param task_id: The ID of the task to reactivate. + :type task_id: str + :param task_reactivate_options: Additional parameters for the + operation + :type task_reactivate_options: + ~azure.batch.models.TaskReactivateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_reactivate_options is not None: + timeout = task_reactivate_options.timeout + client_request_id = None + if task_reactivate_options is not None: + client_request_id = task_reactivate_options.client_request_id + return_client_request_id = None + if task_reactivate_options is not None: + return_client_request_id = task_reactivate_options.return_client_request_id + ocp_date = None + if task_reactivate_options is not None: + ocp_date = task_reactivate_options.ocp_date + if_match = None + if task_reactivate_options is not None: + if_match = task_reactivate_options.if_match + if_none_match = None + if task_reactivate_options is not None: + if_none_match = task_reactivate_options.if_none_match + if_modified_since = None + if task_reactivate_options is not None: + if_modified_since = task_reactivate_options.if_modified_since + if_unmodified_since = None + if task_reactivate_options is not None: + if_unmodified_since = task_reactivate_options.if_unmodified_since + + # Construct URL + url = self.reactivate.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + reactivate.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/reactivate'} diff --git a/azext/generated/sdk/batch/v2018_08_01/version.py b/azext/generated/sdk/batch/v2018_08_01/version.py new file mode 100644 index 00000000..f24f038f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_08_01/version.py @@ -0,0 +1,13 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +VERSION = "2018-08-01.7.0" + diff --git a/azext/generated/sdk/batch/v2018_12_01/__init__.py b/azext/generated/sdk/batch/v2018_12_01/__init__.py new file mode 100644 index 00000000..f27e0cb6 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/__init__.py @@ -0,0 +1,18 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .batch_service_client import BatchServiceClient +from .version import VERSION + +__all__ = ['BatchServiceClient'] + +__version__ = VERSION + diff --git a/azext/generated/sdk/batch/v2018_12_01/batch_service_client.py b/azext/generated/sdk/batch/v2018_12_01/batch_service_client.py new file mode 100644 index 00000000..9a09b4bd --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/batch_service_client.py @@ -0,0 +1,118 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.service_client import SDKClient +from msrest import Serializer, Deserializer +from msrestazure import AzureConfiguration +from .version import VERSION +from .operations.application_operations import ApplicationOperations +from .operations.pool_operations import PoolOperations +from .operations.account_operations import AccountOperations +from .operations.job_operations import JobOperations +from .operations.certificate_operations import CertificateOperations +from .operations.file_operations import FileOperations +from .operations.job_schedule_operations import JobScheduleOperations +from .operations.task_operations import TaskOperations +from .operations.compute_node_operations import ComputeNodeOperations +from . import models + + +class BatchServiceClientConfiguration(AzureConfiguration): + """Configuration for BatchServiceClient + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credentials: Credentials needed for the client to connect to Azure. + :type credentials: :mod:`A msrestazure Credentials + object` + :param batch_url: The base URL for all Azure Batch service requests. + :type batch_url: str + """ + + def __init__( + self, credentials, batch_url): + + if credentials is None: + raise ValueError("Parameter 'credentials' must not be None.") + if batch_url is None: + raise ValueError("Parameter 'batch_url' must not be None.") + base_url = '{batchUrl}' + + super(BatchServiceClientConfiguration, self).__init__(base_url) + + self.add_user_agent('azure-batch/{}'.format(VERSION)) + self.add_user_agent('Azure-SDK-For-Python') + + self.credentials = credentials + self.batch_url = batch_url + + +class BatchServiceClient(SDKClient): + """A client for issuing REST requests to the Azure Batch service. + + :ivar config: Configuration for client. + :vartype config: BatchServiceClientConfiguration + + :ivar application: Application operations + :vartype application: azure.batch.operations.ApplicationOperations + :ivar pool: Pool operations + :vartype pool: azure.batch.operations.PoolOperations + :ivar account: Account operations + :vartype account: azure.batch.operations.AccountOperations + :ivar job: Job operations + :vartype job: azure.batch.operations.JobOperations + :ivar certificate: Certificate operations + :vartype certificate: azure.batch.operations.CertificateOperations + :ivar file: File operations + :vartype file: azure.batch.operations.FileOperations + :ivar job_schedule: JobSchedule operations + :vartype job_schedule: azure.batch.operations.JobScheduleOperations + :ivar task: Task operations + :vartype task: azure.batch.operations.TaskOperations + :ivar compute_node: ComputeNode operations + :vartype compute_node: azure.batch.operations.ComputeNodeOperations + + :param credentials: Credentials needed for the client to connect to Azure. + :type credentials: :mod:`A msrestazure Credentials + object` + :param batch_url: The base URL for all Azure Batch service requests. + :type batch_url: str + """ + + def __init__( + self, credentials, batch_url): + + self.config = BatchServiceClientConfiguration(credentials, batch_url) + super(BatchServiceClient, self).__init__(self.config.credentials, self.config) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self.api_version = '2018-12-01.8.0' + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.application = ApplicationOperations( + self._client, self.config, self._serialize, self._deserialize) + self.pool = PoolOperations( + self._client, self.config, self._serialize, self._deserialize) + self.account = AccountOperations( + self._client, self.config, self._serialize, self._deserialize) + self.job = JobOperations( + self._client, self.config, self._serialize, self._deserialize) + self.certificate = CertificateOperations( + self._client, self.config, self._serialize, self._deserialize) + self.file = FileOperations( + self._client, self.config, self._serialize, self._deserialize) + self.job_schedule = JobScheduleOperations( + self._client, self.config, self._serialize, self._deserialize) + self.task = TaskOperations( + self._client, self.config, self._serialize, self._deserialize) + self.compute_node = ComputeNodeOperations( + self._client, self.config, self._serialize, self._deserialize) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/__init__.py b/azext/generated/sdk/batch/v2018_12_01/models/__init__.py new file mode 100644 index 00000000..ef769e49 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/__init__.py @@ -0,0 +1,722 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +try: + from .pool_usage_metrics_py3 import PoolUsageMetrics + from .image_reference_py3 import ImageReference + from .node_agent_sku_py3 import NodeAgentSku + from .authentication_token_settings_py3 import AuthenticationTokenSettings + from .usage_statistics_py3 import UsageStatistics + from .resource_statistics_py3 import ResourceStatistics + from .pool_statistics_py3 import PoolStatistics + from .job_statistics_py3 import JobStatistics + from .name_value_pair_py3 import NameValuePair + from .delete_certificate_error_py3 import DeleteCertificateError + from .certificate_py3 import Certificate + from .application_package_reference_py3 import ApplicationPackageReference + from .application_summary_py3 import ApplicationSummary + from .certificate_add_parameter_py3 import CertificateAddParameter + from .file_properties_py3 import FileProperties + from .node_file_py3 import NodeFile + from .schedule_py3 import Schedule + from .job_constraints_py3 import JobConstraints + from .job_network_configuration_py3 import JobNetworkConfiguration + from .container_registry_py3 import ContainerRegistry + from .task_container_settings_py3 import TaskContainerSettings + from .resource_file_py3 import ResourceFile + from .environment_setting_py3 import EnvironmentSetting + from .exit_options_py3 import ExitOptions + from .exit_code_mapping_py3 import ExitCodeMapping + from .exit_code_range_mapping_py3 import ExitCodeRangeMapping + from .exit_conditions_py3 import ExitConditions + from .auto_user_specification_py3 import AutoUserSpecification + from .user_identity_py3 import UserIdentity + from .linux_user_configuration_py3 import LinuxUserConfiguration + from .windows_user_configuration_py3 import WindowsUserConfiguration + from .user_account_py3 import UserAccount + from .task_constraints_py3 import TaskConstraints + from .output_file_blob_container_destination_py3 import OutputFileBlobContainerDestination + from .output_file_destination_py3 import OutputFileDestination + from .output_file_upload_options_py3 import OutputFileUploadOptions + from .output_file_py3 import OutputFile + from .job_manager_task_py3 import JobManagerTask + from .job_preparation_task_py3 import JobPreparationTask + from .job_release_task_py3 import JobReleaseTask + from .task_scheduling_policy_py3 import TaskSchedulingPolicy + from .start_task_py3 import StartTask + from .certificate_reference_py3 import CertificateReference + from .metadata_item_py3 import MetadataItem + from .cloud_service_configuration_py3 import CloudServiceConfiguration + from .windows_configuration_py3 import WindowsConfiguration + from .data_disk_py3 import DataDisk + from .container_configuration_py3 import ContainerConfiguration + from .virtual_machine_configuration_py3 import VirtualMachineConfiguration + from .network_security_group_rule_py3 import NetworkSecurityGroupRule + from .inbound_nat_pool_py3 import InboundNATPool + from .pool_endpoint_configuration_py3 import PoolEndpointConfiguration + from .network_configuration_py3 import NetworkConfiguration + from .pool_specification_py3 import PoolSpecification + from .auto_pool_specification_py3 import AutoPoolSpecification + from .pool_information_py3 import PoolInformation + from .job_specification_py3 import JobSpecification + from .recent_job_py3 import RecentJob + from .job_schedule_execution_information_py3 import JobScheduleExecutionInformation + from .job_schedule_statistics_py3 import JobScheduleStatistics + from .cloud_job_schedule_py3 import CloudJobSchedule + from .job_schedule_add_parameter_py3 import JobScheduleAddParameter + from .job_scheduling_error_py3 import JobSchedulingError + from .job_execution_information_py3 import JobExecutionInformation + from .cloud_job_py3 import CloudJob + from .job_add_parameter_py3 import JobAddParameter + from .task_container_execution_information_py3 import TaskContainerExecutionInformation + from .task_failure_information_py3 import TaskFailureInformation + from .job_preparation_task_execution_information_py3 import JobPreparationTaskExecutionInformation + from .job_release_task_execution_information_py3 import JobReleaseTaskExecutionInformation + from .job_preparation_and_release_task_execution_information_py3 import JobPreparationAndReleaseTaskExecutionInformation + from .task_counts_py3 import TaskCounts + from .auto_scale_run_error_py3 import AutoScaleRunError + from .auto_scale_run_py3 import AutoScaleRun + from .resize_error_py3 import ResizeError + from .cloud_pool_py3 import CloudPool + from .pool_add_parameter_py3 import PoolAddParameter + from .affinity_information_py3 import AffinityInformation + from .task_execution_information_py3 import TaskExecutionInformation + from .compute_node_information_py3 import ComputeNodeInformation + from .node_agent_information_py3 import NodeAgentInformation + from .multi_instance_settings_py3 import MultiInstanceSettings + from .task_statistics_py3 import TaskStatistics + from .task_id_range_py3 import TaskIdRange + from .task_dependencies_py3 import TaskDependencies + from .cloud_task_py3 import CloudTask + from .task_add_parameter_py3 import TaskAddParameter + from .task_add_collection_parameter_py3 import TaskAddCollectionParameter + from .error_message_py3 import ErrorMessage + from .batch_error_detail_py3 import BatchErrorDetail + from .batch_error_py3 import BatchError, BatchErrorException + from .task_add_result_py3 import TaskAddResult + from .task_add_collection_result_py3 import TaskAddCollectionResult + from .subtask_information_py3 import SubtaskInformation + from .cloud_task_list_subtasks_result_py3 import CloudTaskListSubtasksResult + from .task_information_py3 import TaskInformation + from .start_task_information_py3 import StartTaskInformation + from .compute_node_error_py3 import ComputeNodeError + from .inbound_endpoint_py3 import InboundEndpoint + from .compute_node_endpoint_configuration_py3 import ComputeNodeEndpointConfiguration + from .compute_node_py3 import ComputeNode + from .compute_node_user_py3 import ComputeNodeUser + from .compute_node_get_remote_login_settings_result_py3 import ComputeNodeGetRemoteLoginSettingsResult + from .job_schedule_patch_parameter_py3 import JobSchedulePatchParameter + from .job_schedule_update_parameter_py3 import JobScheduleUpdateParameter + from .job_disable_parameter_py3 import JobDisableParameter + from .job_terminate_parameter_py3 import JobTerminateParameter + from .job_patch_parameter_py3 import JobPatchParameter + from .job_update_parameter_py3 import JobUpdateParameter + from .pool_enable_auto_scale_parameter_py3 import PoolEnableAutoScaleParameter + from .pool_evaluate_auto_scale_parameter_py3 import PoolEvaluateAutoScaleParameter + from .pool_resize_parameter_py3 import PoolResizeParameter + from .pool_update_properties_parameter_py3 import PoolUpdatePropertiesParameter + from .pool_patch_parameter_py3 import PoolPatchParameter + from .task_update_parameter_py3 import TaskUpdateParameter + from .node_update_user_parameter_py3 import NodeUpdateUserParameter + from .node_reboot_parameter_py3 import NodeRebootParameter + from .node_reimage_parameter_py3 import NodeReimageParameter + from .node_disable_scheduling_parameter_py3 import NodeDisableSchedulingParameter + from .node_remove_parameter_py3 import NodeRemoveParameter + from .upload_batch_service_logs_configuration_py3 import UploadBatchServiceLogsConfiguration + from .upload_batch_service_logs_result_py3 import UploadBatchServiceLogsResult + from .node_counts_py3 import NodeCounts + from .pool_node_counts_py3 import PoolNodeCounts + from .application_list_options_py3 import ApplicationListOptions + from .application_get_options_py3 import ApplicationGetOptions + from .pool_list_usage_metrics_options_py3 import PoolListUsageMetricsOptions + from .pool_get_all_lifetime_statistics_options_py3 import PoolGetAllLifetimeStatisticsOptions + from .pool_add_options_py3 import PoolAddOptions + from .pool_list_options_py3 import PoolListOptions + from .pool_delete_options_py3 import PoolDeleteOptions + from .pool_exists_options_py3 import PoolExistsOptions + from .pool_get_options_py3 import PoolGetOptions + from .pool_patch_options_py3 import PoolPatchOptions + from .pool_disable_auto_scale_options_py3 import PoolDisableAutoScaleOptions + from .pool_enable_auto_scale_options_py3 import PoolEnableAutoScaleOptions + from .pool_evaluate_auto_scale_options_py3 import PoolEvaluateAutoScaleOptions + from .pool_resize_options_py3 import PoolResizeOptions + from .pool_stop_resize_options_py3 import PoolStopResizeOptions + from .pool_update_properties_options_py3 import PoolUpdatePropertiesOptions + from .pool_remove_nodes_options_py3 import PoolRemoveNodesOptions + from .account_list_node_agent_skus_options_py3 import AccountListNodeAgentSkusOptions + from .account_list_pool_node_counts_options_py3 import AccountListPoolNodeCountsOptions + from .job_get_all_lifetime_statistics_options_py3 import JobGetAllLifetimeStatisticsOptions + from .job_delete_options_py3 import JobDeleteOptions + from .job_get_options_py3 import JobGetOptions + from .job_patch_options_py3 import JobPatchOptions + from .job_update_options_py3 import JobUpdateOptions + from .job_disable_options_py3 import JobDisableOptions + from .job_enable_options_py3 import JobEnableOptions + from .job_terminate_options_py3 import JobTerminateOptions + from .job_add_options_py3 import JobAddOptions + from .job_list_options_py3 import JobListOptions + from .job_list_from_job_schedule_options_py3 import JobListFromJobScheduleOptions + from .job_list_preparation_and_release_task_status_options_py3 import JobListPreparationAndReleaseTaskStatusOptions + from .job_get_task_counts_options_py3 import JobGetTaskCountsOptions + from .certificate_add_options_py3 import CertificateAddOptions + from .certificate_list_options_py3 import CertificateListOptions + from .certificate_cancel_deletion_options_py3 import CertificateCancelDeletionOptions + from .certificate_delete_options_py3 import CertificateDeleteOptions + from .certificate_get_options_py3 import CertificateGetOptions + from .file_delete_from_task_options_py3 import FileDeleteFromTaskOptions + from .file_get_from_task_options_py3 import FileGetFromTaskOptions + from .file_get_properties_from_task_options_py3 import FileGetPropertiesFromTaskOptions + from .file_delete_from_compute_node_options_py3 import FileDeleteFromComputeNodeOptions + from .file_get_from_compute_node_options_py3 import FileGetFromComputeNodeOptions + from .file_get_properties_from_compute_node_options_py3 import FileGetPropertiesFromComputeNodeOptions + from .file_list_from_task_options_py3 import FileListFromTaskOptions + from .file_list_from_compute_node_options_py3 import FileListFromComputeNodeOptions + from .job_schedule_exists_options_py3 import JobScheduleExistsOptions + from .job_schedule_delete_options_py3 import JobScheduleDeleteOptions + from .job_schedule_get_options_py3 import JobScheduleGetOptions + from .job_schedule_patch_options_py3 import JobSchedulePatchOptions + from .job_schedule_update_options_py3 import JobScheduleUpdateOptions + from .job_schedule_disable_options_py3 import JobScheduleDisableOptions + from .job_schedule_enable_options_py3 import JobScheduleEnableOptions + from .job_schedule_terminate_options_py3 import JobScheduleTerminateOptions + from .job_schedule_add_options_py3 import JobScheduleAddOptions + from .job_schedule_list_options_py3 import JobScheduleListOptions + from .task_add_options_py3 import TaskAddOptions + from .task_list_options_py3 import TaskListOptions + from .task_add_collection_options_py3 import TaskAddCollectionOptions + from .task_delete_options_py3 import TaskDeleteOptions + from .task_get_options_py3 import TaskGetOptions + from .task_update_options_py3 import TaskUpdateOptions + from .task_list_subtasks_options_py3 import TaskListSubtasksOptions + from .task_terminate_options_py3 import TaskTerminateOptions + from .task_reactivate_options_py3 import TaskReactivateOptions + from .compute_node_add_user_options_py3 import ComputeNodeAddUserOptions + from .compute_node_delete_user_options_py3 import ComputeNodeDeleteUserOptions + from .compute_node_update_user_options_py3 import ComputeNodeUpdateUserOptions + from .compute_node_get_options_py3 import ComputeNodeGetOptions + from .compute_node_reboot_options_py3 import ComputeNodeRebootOptions + from .compute_node_reimage_options_py3 import ComputeNodeReimageOptions + from .compute_node_disable_scheduling_options_py3 import ComputeNodeDisableSchedulingOptions + from .compute_node_enable_scheduling_options_py3 import ComputeNodeEnableSchedulingOptions + from .compute_node_get_remote_login_settings_options_py3 import ComputeNodeGetRemoteLoginSettingsOptions + from .compute_node_get_remote_desktop_options_py3 import ComputeNodeGetRemoteDesktopOptions + from .compute_node_upload_batch_service_logs_options_py3 import ComputeNodeUploadBatchServiceLogsOptions + from .compute_node_list_options_py3 import ComputeNodeListOptions +except (SyntaxError, ImportError): + from .pool_usage_metrics import PoolUsageMetrics + from .image_reference import ImageReference + from .node_agent_sku import NodeAgentSku + from .authentication_token_settings import AuthenticationTokenSettings + from .usage_statistics import UsageStatistics + from .resource_statistics import ResourceStatistics + from .pool_statistics import PoolStatistics + from .job_statistics import JobStatistics + from .name_value_pair import NameValuePair + from .delete_certificate_error import DeleteCertificateError + from .certificate import Certificate + from .application_package_reference import ApplicationPackageReference + from .application_summary import ApplicationSummary + from .certificate_add_parameter import CertificateAddParameter + from .file_properties import FileProperties + from .node_file import NodeFile + from .schedule import Schedule + from .job_constraints import JobConstraints + from .job_network_configuration import JobNetworkConfiguration + from .container_registry import ContainerRegistry + from .task_container_settings import TaskContainerSettings + from .resource_file import ResourceFile + from .environment_setting import EnvironmentSetting + from .exit_options import ExitOptions + from .exit_code_mapping import ExitCodeMapping + from .exit_code_range_mapping import ExitCodeRangeMapping + from .exit_conditions import ExitConditions + from .auto_user_specification import AutoUserSpecification + from .user_identity import UserIdentity + from .linux_user_configuration import LinuxUserConfiguration + from .windows_user_configuration import WindowsUserConfiguration + from .user_account import UserAccount + from .task_constraints import TaskConstraints + from .output_file_blob_container_destination import OutputFileBlobContainerDestination + from .output_file_destination import OutputFileDestination + from .output_file_upload_options import OutputFileUploadOptions + from .output_file import OutputFile + from .job_manager_task import JobManagerTask + from .job_preparation_task import JobPreparationTask + from .job_release_task import JobReleaseTask + from .task_scheduling_policy import TaskSchedulingPolicy + from .start_task import StartTask + from .certificate_reference import CertificateReference + from .metadata_item import MetadataItem + from .cloud_service_configuration import CloudServiceConfiguration + from .windows_configuration import WindowsConfiguration + from .data_disk import DataDisk + from .container_configuration import ContainerConfiguration + from .virtual_machine_configuration import VirtualMachineConfiguration + from .network_security_group_rule import NetworkSecurityGroupRule + from .inbound_nat_pool import InboundNATPool + from .pool_endpoint_configuration import PoolEndpointConfiguration + from .network_configuration import NetworkConfiguration + from .pool_specification import PoolSpecification + from .auto_pool_specification import AutoPoolSpecification + from .pool_information import PoolInformation + from .job_specification import JobSpecification + from .recent_job import RecentJob + from .job_schedule_execution_information import JobScheduleExecutionInformation + from .job_schedule_statistics import JobScheduleStatistics + from .cloud_job_schedule import CloudJobSchedule + from .job_schedule_add_parameter import JobScheduleAddParameter + from .job_scheduling_error import JobSchedulingError + from .job_execution_information import JobExecutionInformation + from .cloud_job import CloudJob + from .job_add_parameter import JobAddParameter + from .task_container_execution_information import TaskContainerExecutionInformation + from .task_failure_information import TaskFailureInformation + from .job_preparation_task_execution_information import JobPreparationTaskExecutionInformation + from .job_release_task_execution_information import JobReleaseTaskExecutionInformation + from .job_preparation_and_release_task_execution_information import JobPreparationAndReleaseTaskExecutionInformation + from .task_counts import TaskCounts + from .auto_scale_run_error import AutoScaleRunError + from .auto_scale_run import AutoScaleRun + from .resize_error import ResizeError + from .cloud_pool import CloudPool + from .pool_add_parameter import PoolAddParameter + from .affinity_information import AffinityInformation + from .task_execution_information import TaskExecutionInformation + from .compute_node_information import ComputeNodeInformation + from .node_agent_information import NodeAgentInformation + from .multi_instance_settings import MultiInstanceSettings + from .task_statistics import TaskStatistics + from .task_id_range import TaskIdRange + from .task_dependencies import TaskDependencies + from .cloud_task import CloudTask + from .task_add_parameter import TaskAddParameter + from .task_add_collection_parameter import TaskAddCollectionParameter + from .error_message import ErrorMessage + from .batch_error_detail import BatchErrorDetail + from .batch_error import BatchError, BatchErrorException + from .task_add_result import TaskAddResult + from .task_add_collection_result import TaskAddCollectionResult + from .subtask_information import SubtaskInformation + from .cloud_task_list_subtasks_result import CloudTaskListSubtasksResult + from .task_information import TaskInformation + from .start_task_information import StartTaskInformation + from .compute_node_error import ComputeNodeError + from .inbound_endpoint import InboundEndpoint + from .compute_node_endpoint_configuration import ComputeNodeEndpointConfiguration + from .compute_node import ComputeNode + from .compute_node_user import ComputeNodeUser + from .compute_node_get_remote_login_settings_result import ComputeNodeGetRemoteLoginSettingsResult + from .job_schedule_patch_parameter import JobSchedulePatchParameter + from .job_schedule_update_parameter import JobScheduleUpdateParameter + from .job_disable_parameter import JobDisableParameter + from .job_terminate_parameter import JobTerminateParameter + from .job_patch_parameter import JobPatchParameter + from .job_update_parameter import JobUpdateParameter + from .pool_enable_auto_scale_parameter import PoolEnableAutoScaleParameter + from .pool_evaluate_auto_scale_parameter import PoolEvaluateAutoScaleParameter + from .pool_resize_parameter import PoolResizeParameter + from .pool_update_properties_parameter import PoolUpdatePropertiesParameter + from .pool_patch_parameter import PoolPatchParameter + from .task_update_parameter import TaskUpdateParameter + from .node_update_user_parameter import NodeUpdateUserParameter + from .node_reboot_parameter import NodeRebootParameter + from .node_reimage_parameter import NodeReimageParameter + from .node_disable_scheduling_parameter import NodeDisableSchedulingParameter + from .node_remove_parameter import NodeRemoveParameter + from .upload_batch_service_logs_configuration import UploadBatchServiceLogsConfiguration + from .upload_batch_service_logs_result import UploadBatchServiceLogsResult + from .node_counts import NodeCounts + from .pool_node_counts import PoolNodeCounts + from .application_list_options import ApplicationListOptions + from .application_get_options import ApplicationGetOptions + from .pool_list_usage_metrics_options import PoolListUsageMetricsOptions + from .pool_get_all_lifetime_statistics_options import PoolGetAllLifetimeStatisticsOptions + from .pool_add_options import PoolAddOptions + from .pool_list_options import PoolListOptions + from .pool_delete_options import PoolDeleteOptions + from .pool_exists_options import PoolExistsOptions + from .pool_get_options import PoolGetOptions + from .pool_patch_options import PoolPatchOptions + from .pool_disable_auto_scale_options import PoolDisableAutoScaleOptions + from .pool_enable_auto_scale_options import PoolEnableAutoScaleOptions + from .pool_evaluate_auto_scale_options import PoolEvaluateAutoScaleOptions + from .pool_resize_options import PoolResizeOptions + from .pool_stop_resize_options import PoolStopResizeOptions + from .pool_update_properties_options import PoolUpdatePropertiesOptions + from .pool_remove_nodes_options import PoolRemoveNodesOptions + from .account_list_node_agent_skus_options import AccountListNodeAgentSkusOptions + from .account_list_pool_node_counts_options import AccountListPoolNodeCountsOptions + from .job_get_all_lifetime_statistics_options import JobGetAllLifetimeStatisticsOptions + from .job_delete_options import JobDeleteOptions + from .job_get_options import JobGetOptions + from .job_patch_options import JobPatchOptions + from .job_update_options import JobUpdateOptions + from .job_disable_options import JobDisableOptions + from .job_enable_options import JobEnableOptions + from .job_terminate_options import JobTerminateOptions + from .job_add_options import JobAddOptions + from .job_list_options import JobListOptions + from .job_list_from_job_schedule_options import JobListFromJobScheduleOptions + from .job_list_preparation_and_release_task_status_options import JobListPreparationAndReleaseTaskStatusOptions + from .job_get_task_counts_options import JobGetTaskCountsOptions + from .certificate_add_options import CertificateAddOptions + from .certificate_list_options import CertificateListOptions + from .certificate_cancel_deletion_options import CertificateCancelDeletionOptions + from .certificate_delete_options import CertificateDeleteOptions + from .certificate_get_options import CertificateGetOptions + from .file_delete_from_task_options import FileDeleteFromTaskOptions + from .file_get_from_task_options import FileGetFromTaskOptions + from .file_get_properties_from_task_options import FileGetPropertiesFromTaskOptions + from .file_delete_from_compute_node_options import FileDeleteFromComputeNodeOptions + from .file_get_from_compute_node_options import FileGetFromComputeNodeOptions + from .file_get_properties_from_compute_node_options import FileGetPropertiesFromComputeNodeOptions + from .file_list_from_task_options import FileListFromTaskOptions + from .file_list_from_compute_node_options import FileListFromComputeNodeOptions + from .job_schedule_exists_options import JobScheduleExistsOptions + from .job_schedule_delete_options import JobScheduleDeleteOptions + from .job_schedule_get_options import JobScheduleGetOptions + from .job_schedule_patch_options import JobSchedulePatchOptions + from .job_schedule_update_options import JobScheduleUpdateOptions + from .job_schedule_disable_options import JobScheduleDisableOptions + from .job_schedule_enable_options import JobScheduleEnableOptions + from .job_schedule_terminate_options import JobScheduleTerminateOptions + from .job_schedule_add_options import JobScheduleAddOptions + from .job_schedule_list_options import JobScheduleListOptions + from .task_add_options import TaskAddOptions + from .task_list_options import TaskListOptions + from .task_add_collection_options import TaskAddCollectionOptions + from .task_delete_options import TaskDeleteOptions + from .task_get_options import TaskGetOptions + from .task_update_options import TaskUpdateOptions + from .task_list_subtasks_options import TaskListSubtasksOptions + from .task_terminate_options import TaskTerminateOptions + from .task_reactivate_options import TaskReactivateOptions + from .compute_node_add_user_options import ComputeNodeAddUserOptions + from .compute_node_delete_user_options import ComputeNodeDeleteUserOptions + from .compute_node_update_user_options import ComputeNodeUpdateUserOptions + from .compute_node_get_options import ComputeNodeGetOptions + from .compute_node_reboot_options import ComputeNodeRebootOptions + from .compute_node_reimage_options import ComputeNodeReimageOptions + from .compute_node_disable_scheduling_options import ComputeNodeDisableSchedulingOptions + from .compute_node_enable_scheduling_options import ComputeNodeEnableSchedulingOptions + from .compute_node_get_remote_login_settings_options import ComputeNodeGetRemoteLoginSettingsOptions + from .compute_node_get_remote_desktop_options import ComputeNodeGetRemoteDesktopOptions + from .compute_node_upload_batch_service_logs_options import ComputeNodeUploadBatchServiceLogsOptions + from .compute_node_list_options import ComputeNodeListOptions +from .application_summary_paged import ApplicationSummaryPaged +from .pool_usage_metrics_paged import PoolUsageMetricsPaged +from .cloud_pool_paged import CloudPoolPaged +from .node_agent_sku_paged import NodeAgentSkuPaged +from .pool_node_counts_paged import PoolNodeCountsPaged +from .cloud_job_paged import CloudJobPaged +from .job_preparation_and_release_task_execution_information_paged import JobPreparationAndReleaseTaskExecutionInformationPaged +from .certificate_paged import CertificatePaged +from .node_file_paged import NodeFilePaged +from .cloud_job_schedule_paged import CloudJobSchedulePaged +from .cloud_task_paged import CloudTaskPaged +from .compute_node_paged import ComputeNodePaged +from .batch_service_client_enums import ( + OSType, + AccessScope, + CertificateState, + CertificateFormat, + JobAction, + DependencyAction, + AutoUserScope, + ElevationLevel, + LoginMode, + OutputFileUploadCondition, + ComputeNodeFillType, + CertificateStoreLocation, + CertificateVisibility, + CachingType, + StorageAccountType, + DynamicVNetAssignmentScope, + InboundEndpointProtocol, + NetworkSecurityGroupRuleAccess, + PoolLifetimeOption, + OnAllTasksComplete, + OnTaskFailure, + JobScheduleState, + ErrorCategory, + JobState, + JobPreparationTaskState, + TaskExecutionResult, + JobReleaseTaskState, + PoolState, + AllocationState, + TaskState, + TaskAddStatus, + SubtaskState, + StartTaskState, + ComputeNodeState, + SchedulingState, + DisableJobOption, + ComputeNodeDeallocationOption, + ComputeNodeRebootOption, + ComputeNodeReimageOption, + DisableComputeNodeSchedulingOption, +) + +__all__ = [ + 'PoolUsageMetrics', + 'ImageReference', + 'NodeAgentSku', + 'AuthenticationTokenSettings', + 'UsageStatistics', + 'ResourceStatistics', + 'PoolStatistics', + 'JobStatistics', + 'NameValuePair', + 'DeleteCertificateError', + 'Certificate', + 'ApplicationPackageReference', + 'ApplicationSummary', + 'CertificateAddParameter', + 'FileProperties', + 'NodeFile', + 'Schedule', + 'JobConstraints', + 'JobNetworkConfiguration', + 'ContainerRegistry', + 'TaskContainerSettings', + 'ResourceFile', + 'EnvironmentSetting', + 'ExitOptions', + 'ExitCodeMapping', + 'ExitCodeRangeMapping', + 'ExitConditions', + 'AutoUserSpecification', + 'UserIdentity', + 'LinuxUserConfiguration', + 'WindowsUserConfiguration', + 'UserAccount', + 'TaskConstraints', + 'OutputFileBlobContainerDestination', + 'OutputFileDestination', + 'OutputFileUploadOptions', + 'OutputFile', + 'JobManagerTask', + 'JobPreparationTask', + 'JobReleaseTask', + 'TaskSchedulingPolicy', + 'StartTask', + 'CertificateReference', + 'MetadataItem', + 'CloudServiceConfiguration', + 'WindowsConfiguration', + 'DataDisk', + 'ContainerConfiguration', + 'VirtualMachineConfiguration', + 'NetworkSecurityGroupRule', + 'InboundNATPool', + 'PoolEndpointConfiguration', + 'NetworkConfiguration', + 'PoolSpecification', + 'AutoPoolSpecification', + 'PoolInformation', + 'JobSpecification', + 'RecentJob', + 'JobScheduleExecutionInformation', + 'JobScheduleStatistics', + 'CloudJobSchedule', + 'JobScheduleAddParameter', + 'JobSchedulingError', + 'JobExecutionInformation', + 'CloudJob', + 'JobAddParameter', + 'TaskContainerExecutionInformation', + 'TaskFailureInformation', + 'JobPreparationTaskExecutionInformation', + 'JobReleaseTaskExecutionInformation', + 'JobPreparationAndReleaseTaskExecutionInformation', + 'TaskCounts', + 'AutoScaleRunError', + 'AutoScaleRun', + 'ResizeError', + 'CloudPool', + 'PoolAddParameter', + 'AffinityInformation', + 'TaskExecutionInformation', + 'ComputeNodeInformation', + 'NodeAgentInformation', + 'MultiInstanceSettings', + 'TaskStatistics', + 'TaskIdRange', + 'TaskDependencies', + 'CloudTask', + 'TaskAddParameter', + 'TaskAddCollectionParameter', + 'ErrorMessage', + 'BatchErrorDetail', + 'BatchError', 'BatchErrorException', + 'TaskAddResult', + 'TaskAddCollectionResult', + 'SubtaskInformation', + 'CloudTaskListSubtasksResult', + 'TaskInformation', + 'StartTaskInformation', + 'ComputeNodeError', + 'InboundEndpoint', + 'ComputeNodeEndpointConfiguration', + 'ComputeNode', + 'ComputeNodeUser', + 'ComputeNodeGetRemoteLoginSettingsResult', + 'JobSchedulePatchParameter', + 'JobScheduleUpdateParameter', + 'JobDisableParameter', + 'JobTerminateParameter', + 'JobPatchParameter', + 'JobUpdateParameter', + 'PoolEnableAutoScaleParameter', + 'PoolEvaluateAutoScaleParameter', + 'PoolResizeParameter', + 'PoolUpdatePropertiesParameter', + 'PoolPatchParameter', + 'TaskUpdateParameter', + 'NodeUpdateUserParameter', + 'NodeRebootParameter', + 'NodeReimageParameter', + 'NodeDisableSchedulingParameter', + 'NodeRemoveParameter', + 'UploadBatchServiceLogsConfiguration', + 'UploadBatchServiceLogsResult', + 'NodeCounts', + 'PoolNodeCounts', + 'ApplicationListOptions', + 'ApplicationGetOptions', + 'PoolListUsageMetricsOptions', + 'PoolGetAllLifetimeStatisticsOptions', + 'PoolAddOptions', + 'PoolListOptions', + 'PoolDeleteOptions', + 'PoolExistsOptions', + 'PoolGetOptions', + 'PoolPatchOptions', + 'PoolDisableAutoScaleOptions', + 'PoolEnableAutoScaleOptions', + 'PoolEvaluateAutoScaleOptions', + 'PoolResizeOptions', + 'PoolStopResizeOptions', + 'PoolUpdatePropertiesOptions', + 'PoolRemoveNodesOptions', + 'AccountListNodeAgentSkusOptions', + 'AccountListPoolNodeCountsOptions', + 'JobGetAllLifetimeStatisticsOptions', + 'JobDeleteOptions', + 'JobGetOptions', + 'JobPatchOptions', + 'JobUpdateOptions', + 'JobDisableOptions', + 'JobEnableOptions', + 'JobTerminateOptions', + 'JobAddOptions', + 'JobListOptions', + 'JobListFromJobScheduleOptions', + 'JobListPreparationAndReleaseTaskStatusOptions', + 'JobGetTaskCountsOptions', + 'CertificateAddOptions', + 'CertificateListOptions', + 'CertificateCancelDeletionOptions', + 'CertificateDeleteOptions', + 'CertificateGetOptions', + 'FileDeleteFromTaskOptions', + 'FileGetFromTaskOptions', + 'FileGetPropertiesFromTaskOptions', + 'FileDeleteFromComputeNodeOptions', + 'FileGetFromComputeNodeOptions', + 'FileGetPropertiesFromComputeNodeOptions', + 'FileListFromTaskOptions', + 'FileListFromComputeNodeOptions', + 'JobScheduleExistsOptions', + 'JobScheduleDeleteOptions', + 'JobScheduleGetOptions', + 'JobSchedulePatchOptions', + 'JobScheduleUpdateOptions', + 'JobScheduleDisableOptions', + 'JobScheduleEnableOptions', + 'JobScheduleTerminateOptions', + 'JobScheduleAddOptions', + 'JobScheduleListOptions', + 'TaskAddOptions', + 'TaskListOptions', + 'TaskAddCollectionOptions', + 'TaskDeleteOptions', + 'TaskGetOptions', + 'TaskUpdateOptions', + 'TaskListSubtasksOptions', + 'TaskTerminateOptions', + 'TaskReactivateOptions', + 'ComputeNodeAddUserOptions', + 'ComputeNodeDeleteUserOptions', + 'ComputeNodeUpdateUserOptions', + 'ComputeNodeGetOptions', + 'ComputeNodeRebootOptions', + 'ComputeNodeReimageOptions', + 'ComputeNodeDisableSchedulingOptions', + 'ComputeNodeEnableSchedulingOptions', + 'ComputeNodeGetRemoteLoginSettingsOptions', + 'ComputeNodeGetRemoteDesktopOptions', + 'ComputeNodeUploadBatchServiceLogsOptions', + 'ComputeNodeListOptions', + 'ApplicationSummaryPaged', + 'PoolUsageMetricsPaged', + 'CloudPoolPaged', + 'NodeAgentSkuPaged', + 'PoolNodeCountsPaged', + 'CloudJobPaged', + 'JobPreparationAndReleaseTaskExecutionInformationPaged', + 'CertificatePaged', + 'NodeFilePaged', + 'CloudJobSchedulePaged', + 'CloudTaskPaged', + 'ComputeNodePaged', + 'OSType', + 'AccessScope', + 'CertificateState', + 'CertificateFormat', + 'JobAction', + 'DependencyAction', + 'AutoUserScope', + 'ElevationLevel', + 'LoginMode', + 'OutputFileUploadCondition', + 'ComputeNodeFillType', + 'CertificateStoreLocation', + 'CertificateVisibility', + 'CachingType', + 'StorageAccountType', + 'DynamicVNetAssignmentScope', + 'InboundEndpointProtocol', + 'NetworkSecurityGroupRuleAccess', + 'PoolLifetimeOption', + 'OnAllTasksComplete', + 'OnTaskFailure', + 'JobScheduleState', + 'ErrorCategory', + 'JobState', + 'JobPreparationTaskState', + 'TaskExecutionResult', + 'JobReleaseTaskState', + 'PoolState', + 'AllocationState', + 'TaskState', + 'TaskAddStatus', + 'SubtaskState', + 'StartTaskState', + 'ComputeNodeState', + 'SchedulingState', + 'DisableJobOption', + 'ComputeNodeDeallocationOption', + 'ComputeNodeRebootOption', + 'ComputeNodeReimageOption', + 'DisableComputeNodeSchedulingOption', +] diff --git a/azext/generated/sdk/batch/v2018_12_01/models/account_list_node_agent_skus_options.py b/azext/generated/sdk/batch/v2018_12_01/models/account_list_node_agent_skus_options.py new file mode 100644 index 00000000..6cc1c050 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/account_list_node_agent_skus_options.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AccountListNodeAgentSkusOptions(Model): + """Additional parameters for list_node_agent_skus operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-node-agent-skus. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 results will be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(AccountListNodeAgentSkusOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/account_list_node_agent_skus_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/account_list_node_agent_skus_options_py3.py new file mode 100644 index 00000000..01d06fb1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/account_list_node_agent_skus_options_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AccountListNodeAgentSkusOptions(Model): + """Additional parameters for list_node_agent_skus operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-node-agent-skus. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 results will be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(AccountListNodeAgentSkusOptions, self).__init__(**kwargs) + self.filter = filter + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/account_list_pool_node_counts_options.py b/azext/generated/sdk/batch/v2018_12_01/models/account_list_pool_node_counts_options.py new file mode 100644 index 00000000..4ad2da01 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/account_list_pool_node_counts_options.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AccountListPoolNodeCountsOptions(Model): + """Additional parameters for list_pool_node_counts operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch. + :type filter: str + :param max_results: The maximum number of items to return in the response. + Default value: 10 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(AccountListPoolNodeCountsOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.max_results = kwargs.get('max_results', 10) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/account_list_pool_node_counts_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/account_list_pool_node_counts_options_py3.py new file mode 100644 index 00000000..e9f0d02b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/account_list_pool_node_counts_options_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AccountListPoolNodeCountsOptions(Model): + """Additional parameters for list_pool_node_counts operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch. + :type filter: str + :param max_results: The maximum number of items to return in the response. + Default value: 10 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, max_results: int=10, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(AccountListPoolNodeCountsOptions, self).__init__(**kwargs) + self.filter = filter + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/affinity_information.py b/azext/generated/sdk/batch/v2018_12_01/models/affinity_information.py new file mode 100644 index 00000000..206608f1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/affinity_information.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AffinityInformation(Model): + """A locality hint that can be used by the Batch service to select a compute + node on which to start a task. + + All required parameters must be populated in order to send to Azure. + + :param affinity_id: Required. An opaque string representing the location + of a compute node or a task that has run previously. You can pass the + affinityId of a compute node to indicate that this task needs to run on + that compute node. Note that this is just a soft affinity. If the target + node is busy or unavailable at the time the task is scheduled, then the + task will be scheduled elsewhere. + :type affinity_id: str + """ + + _validation = { + 'affinity_id': {'required': True}, + } + + _attribute_map = { + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(AffinityInformation, self).__init__(**kwargs) + self.affinity_id = kwargs.get('affinity_id', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/affinity_information_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/affinity_information_py3.py new file mode 100644 index 00000000..fcf8d04b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/affinity_information_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AffinityInformation(Model): + """A locality hint that can be used by the Batch service to select a compute + node on which to start a task. + + All required parameters must be populated in order to send to Azure. + + :param affinity_id: Required. An opaque string representing the location + of a compute node or a task that has run previously. You can pass the + affinityId of a compute node to indicate that this task needs to run on + that compute node. Note that this is just a soft affinity. If the target + node is busy or unavailable at the time the task is scheduled, then the + task will be scheduled elsewhere. + :type affinity_id: str + """ + + _validation = { + 'affinity_id': {'required': True}, + } + + _attribute_map = { + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + } + + def __init__(self, *, affinity_id: str, **kwargs) -> None: + super(AffinityInformation, self).__init__(**kwargs) + self.affinity_id = affinity_id diff --git a/azext/generated/sdk/batch/v2018_12_01/models/application_get_options.py b/azext/generated/sdk/batch/v2018_12_01/models/application_get_options.py new file mode 100644 index 00000000..038c5421 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/application_get_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationGetOptions(Model): + """Additional parameters for get operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ApplicationGetOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/application_get_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/application_get_options_py3.py new file mode 100644 index 00000000..3c9d5c0a --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/application_get_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationGetOptions(Model): + """Additional parameters for get operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ApplicationGetOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/application_list_options.py b/azext/generated/sdk/batch/v2018_12_01/models/application_list_options.py new file mode 100644 index 00000000..bc3ddb36 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/application_list_options.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationListOptions(Model): + """Additional parameters for list operation. + + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 applications can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ApplicationListOptions, self).__init__(**kwargs) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/application_list_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/application_list_options_py3.py new file mode 100644 index 00000000..445de51e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/application_list_options_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationListOptions(Model): + """Additional parameters for list operation. + + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 applications can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ApplicationListOptions, self).__init__(**kwargs) + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/application_package_reference.py b/azext/generated/sdk/batch/v2018_12_01/models/application_package_reference.py new file mode 100644 index 00000000..08673617 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/application_package_reference.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationPackageReference(Model): + """A reference to an application package to be deployed to compute nodes. + + All required parameters must be populated in order to send to Azure. + + :param application_id: Required. The ID of the application to deploy. + :type application_id: str + :param version: The version of the application to deploy. If omitted, the + default version is deployed. If this is omitted on a pool, and no default + version is specified for this application, the request fails with the + error code InvalidApplicationPackageReferences and HTTP status code 409. + If this is omitted on a task, and no default version is specified for this + application, the task fails with a pre-processing error. + :type version: str + """ + + _validation = { + 'application_id': {'required': True}, + } + + _attribute_map = { + 'application_id': {'key': 'applicationId', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ApplicationPackageReference, self).__init__(**kwargs) + self.application_id = kwargs.get('application_id', None) + self.version = kwargs.get('version', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/application_package_reference_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/application_package_reference_py3.py new file mode 100644 index 00000000..dd81226b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/application_package_reference_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationPackageReference(Model): + """A reference to an application package to be deployed to compute nodes. + + All required parameters must be populated in order to send to Azure. + + :param application_id: Required. The ID of the application to deploy. + :type application_id: str + :param version: The version of the application to deploy. If omitted, the + default version is deployed. If this is omitted on a pool, and no default + version is specified for this application, the request fails with the + error code InvalidApplicationPackageReferences and HTTP status code 409. + If this is omitted on a task, and no default version is specified for this + application, the task fails with a pre-processing error. + :type version: str + """ + + _validation = { + 'application_id': {'required': True}, + } + + _attribute_map = { + 'application_id': {'key': 'applicationId', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + } + + def __init__(self, *, application_id: str, version: str=None, **kwargs) -> None: + super(ApplicationPackageReference, self).__init__(**kwargs) + self.application_id = application_id + self.version = version diff --git a/azext/generated/sdk/batch/v2018_12_01/models/application_summary.py b/azext/generated/sdk/batch/v2018_12_01/models/application_summary.py new file mode 100644 index 00000000..4a65a11f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/application_summary.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationSummary(Model): + """Contains information about an application in an Azure Batch account. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the application + within the account. + :type id: str + :param display_name: Required. The display name for the application. + :type display_name: str + :param versions: Required. The list of available versions of the + application. + :type versions: list[str] + """ + + _validation = { + 'id': {'required': True}, + 'display_name': {'required': True}, + 'versions': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'versions': {'key': 'versions', 'type': '[str]'}, + } + + def __init__(self, **kwargs): + super(ApplicationSummary, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.versions = kwargs.get('versions', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/application_summary_paged.py b/azext/generated/sdk/batch/v2018_12_01/models/application_summary_paged.py new file mode 100644 index 00000000..64ed9c6b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/application_summary_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class ApplicationSummaryPaged(Paged): + """ + A paging container for iterating over a list of :class:`ApplicationSummary ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[ApplicationSummary]'} + } + + def __init__(self, *args, **kwargs): + + super(ApplicationSummaryPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/application_summary_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/application_summary_py3.py new file mode 100644 index 00000000..68c838e8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/application_summary_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationSummary(Model): + """Contains information about an application in an Azure Batch account. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the application + within the account. + :type id: str + :param display_name: Required. The display name for the application. + :type display_name: str + :param versions: Required. The list of available versions of the + application. + :type versions: list[str] + """ + + _validation = { + 'id': {'required': True}, + 'display_name': {'required': True}, + 'versions': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'versions': {'key': 'versions', 'type': '[str]'}, + } + + def __init__(self, *, id: str, display_name: str, versions, **kwargs) -> None: + super(ApplicationSummary, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.versions = versions diff --git a/azext/generated/sdk/batch/v2018_12_01/models/authentication_token_settings.py b/azext/generated/sdk/batch/v2018_12_01/models/authentication_token_settings.py new file mode 100644 index 00000000..fd05e87b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/authentication_token_settings.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AuthenticationTokenSettings(Model): + """The settings for an authentication token that the task can use to perform + Batch service operations. + + :param access: The Batch resources to which the token grants access. The + authentication token grants access to a limited set of Batch service + operations. Currently the only supported value for the access property is + 'job', which grants access to all operations related to the job which + contains the task. + :type access: list[str or ~azure.batch.models.AccessScope] + """ + + _attribute_map = { + 'access': {'key': 'access', 'type': '[AccessScope]'}, + } + + def __init__(self, **kwargs): + super(AuthenticationTokenSettings, self).__init__(**kwargs) + self.access = kwargs.get('access', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/authentication_token_settings_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/authentication_token_settings_py3.py new file mode 100644 index 00000000..23dde60c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/authentication_token_settings_py3.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AuthenticationTokenSettings(Model): + """The settings for an authentication token that the task can use to perform + Batch service operations. + + :param access: The Batch resources to which the token grants access. The + authentication token grants access to a limited set of Batch service + operations. Currently the only supported value for the access property is + 'job', which grants access to all operations related to the job which + contains the task. + :type access: list[str or ~azure.batch.models.AccessScope] + """ + + _attribute_map = { + 'access': {'key': 'access', 'type': '[AccessScope]'}, + } + + def __init__(self, *, access=None, **kwargs) -> None: + super(AuthenticationTokenSettings, self).__init__(**kwargs) + self.access = access diff --git a/azext/generated/sdk/batch/v2018_12_01/models/auto_pool_specification.py b/azext/generated/sdk/batch/v2018_12_01/models/auto_pool_specification.py new file mode 100644 index 00000000..7383a65a --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/auto_pool_specification.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoPoolSpecification(Model): + """Specifies characteristics for a temporary 'auto pool'. The Batch service + will create this auto pool when the job is submitted. + + All required parameters must be populated in order to send to Azure. + + :param auto_pool_id_prefix: A prefix to be added to the unique identifier + when a pool is automatically created. The Batch service assigns each auto + pool a unique identifier on creation. To distinguish between pools created + for different purposes, you can specify this element to add a prefix to + the ID that is assigned. The prefix can be up to 20 characters long. + :type auto_pool_id_prefix: str + :param pool_lifetime_option: Required. The minimum lifetime of created + auto pools, and how multiple jobs on a schedule are assigned to pools. + Possible values include: 'jobSchedule', 'job' + :type pool_lifetime_option: str or ~azure.batch.models.PoolLifetimeOption + :param keep_alive: Whether to keep an auto pool alive after its lifetime + expires. If false, the Batch service deletes the pool once its lifetime + (as determined by the poolLifetimeOption setting) expires; that is, when + the job or job schedule completes. If true, the Batch service does not + delete the pool automatically. It is up to the user to delete auto pools + created with this option. + :type keep_alive: bool + :param pool: The pool specification for the auto pool. + :type pool: ~azure.batch.models.PoolSpecification + """ + + _validation = { + 'pool_lifetime_option': {'required': True}, + } + + _attribute_map = { + 'auto_pool_id_prefix': {'key': 'autoPoolIdPrefix', 'type': 'str'}, + 'pool_lifetime_option': {'key': 'poolLifetimeOption', 'type': 'PoolLifetimeOption'}, + 'keep_alive': {'key': 'keepAlive', 'type': 'bool'}, + 'pool': {'key': 'pool', 'type': 'PoolSpecification'}, + } + + def __init__(self, **kwargs): + super(AutoPoolSpecification, self).__init__(**kwargs) + self.auto_pool_id_prefix = kwargs.get('auto_pool_id_prefix', None) + self.pool_lifetime_option = kwargs.get('pool_lifetime_option', None) + self.keep_alive = kwargs.get('keep_alive', None) + self.pool = kwargs.get('pool', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/auto_pool_specification_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/auto_pool_specification_py3.py new file mode 100644 index 00000000..4b07e831 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/auto_pool_specification_py3.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoPoolSpecification(Model): + """Specifies characteristics for a temporary 'auto pool'. The Batch service + will create this auto pool when the job is submitted. + + All required parameters must be populated in order to send to Azure. + + :param auto_pool_id_prefix: A prefix to be added to the unique identifier + when a pool is automatically created. The Batch service assigns each auto + pool a unique identifier on creation. To distinguish between pools created + for different purposes, you can specify this element to add a prefix to + the ID that is assigned. The prefix can be up to 20 characters long. + :type auto_pool_id_prefix: str + :param pool_lifetime_option: Required. The minimum lifetime of created + auto pools, and how multiple jobs on a schedule are assigned to pools. + Possible values include: 'jobSchedule', 'job' + :type pool_lifetime_option: str or ~azure.batch.models.PoolLifetimeOption + :param keep_alive: Whether to keep an auto pool alive after its lifetime + expires. If false, the Batch service deletes the pool once its lifetime + (as determined by the poolLifetimeOption setting) expires; that is, when + the job or job schedule completes. If true, the Batch service does not + delete the pool automatically. It is up to the user to delete auto pools + created with this option. + :type keep_alive: bool + :param pool: The pool specification for the auto pool. + :type pool: ~azure.batch.models.PoolSpecification + """ + + _validation = { + 'pool_lifetime_option': {'required': True}, + } + + _attribute_map = { + 'auto_pool_id_prefix': {'key': 'autoPoolIdPrefix', 'type': 'str'}, + 'pool_lifetime_option': {'key': 'poolLifetimeOption', 'type': 'PoolLifetimeOption'}, + 'keep_alive': {'key': 'keepAlive', 'type': 'bool'}, + 'pool': {'key': 'pool', 'type': 'PoolSpecification'}, + } + + def __init__(self, *, pool_lifetime_option, auto_pool_id_prefix: str=None, keep_alive: bool=None, pool=None, **kwargs) -> None: + super(AutoPoolSpecification, self).__init__(**kwargs) + self.auto_pool_id_prefix = auto_pool_id_prefix + self.pool_lifetime_option = pool_lifetime_option + self.keep_alive = keep_alive + self.pool = pool diff --git a/azext/generated/sdk/batch/v2018_12_01/models/auto_scale_run.py b/azext/generated/sdk/batch/v2018_12_01/models/auto_scale_run.py new file mode 100644 index 00000000..06b0d4fe --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/auto_scale_run.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoScaleRun(Model): + """The results and errors from an execution of a pool autoscale formula. + + All required parameters must be populated in order to send to Azure. + + :param timestamp: Required. The time at which the autoscale formula was + last evaluated. + :type timestamp: datetime + :param results: The final values of all variables used in the evaluation + of the autoscale formula. Each variable value is returned in the form + $variable=value, and variables are separated by semicolons. + :type results: str + :param error: Details of the error encountered evaluating the autoscale + formula on the pool, if the evaluation was unsuccessful. + :type error: ~azure.batch.models.AutoScaleRunError + """ + + _validation = { + 'timestamp': {'required': True}, + } + + _attribute_map = { + 'timestamp': {'key': 'timestamp', 'type': 'iso-8601'}, + 'results': {'key': 'results', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'AutoScaleRunError'}, + } + + def __init__(self, **kwargs): + super(AutoScaleRun, self).__init__(**kwargs) + self.timestamp = kwargs.get('timestamp', None) + self.results = kwargs.get('results', None) + self.error = kwargs.get('error', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/auto_scale_run_error.py b/azext/generated/sdk/batch/v2018_12_01/models/auto_scale_run_error.py new file mode 100644 index 00000000..d0d7b163 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/auto_scale_run_error.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoScaleRunError(Model): + """An error that occurred when executing or evaluating a pool autoscale + formula. + + :param code: An identifier for the autoscale error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the autoscale error, intended to be + suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the autoscale + error. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(AutoScaleRunError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.values = kwargs.get('values', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/auto_scale_run_error_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/auto_scale_run_error_py3.py new file mode 100644 index 00000000..8ffa9805 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/auto_scale_run_error_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoScaleRunError(Model): + """An error that occurred when executing or evaluating a pool autoscale + formula. + + :param code: An identifier for the autoscale error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the autoscale error, intended to be + suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the autoscale + error. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, code: str=None, message: str=None, values=None, **kwargs) -> None: + super(AutoScaleRunError, self).__init__(**kwargs) + self.code = code + self.message = message + self.values = values diff --git a/azext/generated/sdk/batch/v2018_12_01/models/auto_scale_run_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/auto_scale_run_py3.py new file mode 100644 index 00000000..9f58e936 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/auto_scale_run_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoScaleRun(Model): + """The results and errors from an execution of a pool autoscale formula. + + All required parameters must be populated in order to send to Azure. + + :param timestamp: Required. The time at which the autoscale formula was + last evaluated. + :type timestamp: datetime + :param results: The final values of all variables used in the evaluation + of the autoscale formula. Each variable value is returned in the form + $variable=value, and variables are separated by semicolons. + :type results: str + :param error: Details of the error encountered evaluating the autoscale + formula on the pool, if the evaluation was unsuccessful. + :type error: ~azure.batch.models.AutoScaleRunError + """ + + _validation = { + 'timestamp': {'required': True}, + } + + _attribute_map = { + 'timestamp': {'key': 'timestamp', 'type': 'iso-8601'}, + 'results': {'key': 'results', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'AutoScaleRunError'}, + } + + def __init__(self, *, timestamp, results: str=None, error=None, **kwargs) -> None: + super(AutoScaleRun, self).__init__(**kwargs) + self.timestamp = timestamp + self.results = results + self.error = error diff --git a/azext/generated/sdk/batch/v2018_12_01/models/auto_user_specification.py b/azext/generated/sdk/batch/v2018_12_01/models/auto_user_specification.py new file mode 100644 index 00000000..60127c74 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/auto_user_specification.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoUserSpecification(Model): + """Specifies the parameters for the auto user that runs a task on the Batch + service. + + :param scope: The scope for the auto user. The default value is task. + Possible values include: 'task', 'pool' + :type scope: str or ~azure.batch.models.AutoUserScope + :param elevation_level: The elevation level of the auto user. The default + value is nonAdmin. Possible values include: 'nonAdmin', 'admin' + :type elevation_level: str or ~azure.batch.models.ElevationLevel + """ + + _attribute_map = { + 'scope': {'key': 'scope', 'type': 'AutoUserScope'}, + 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, + } + + def __init__(self, **kwargs): + super(AutoUserSpecification, self).__init__(**kwargs) + self.scope = kwargs.get('scope', None) + self.elevation_level = kwargs.get('elevation_level', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/auto_user_specification_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/auto_user_specification_py3.py new file mode 100644 index 00000000..bc590d2c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/auto_user_specification_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoUserSpecification(Model): + """Specifies the parameters for the auto user that runs a task on the Batch + service. + + :param scope: The scope for the auto user. The default value is task. + Possible values include: 'task', 'pool' + :type scope: str or ~azure.batch.models.AutoUserScope + :param elevation_level: The elevation level of the auto user. The default + value is nonAdmin. Possible values include: 'nonAdmin', 'admin' + :type elevation_level: str or ~azure.batch.models.ElevationLevel + """ + + _attribute_map = { + 'scope': {'key': 'scope', 'type': 'AutoUserScope'}, + 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, + } + + def __init__(self, *, scope=None, elevation_level=None, **kwargs) -> None: + super(AutoUserSpecification, self).__init__(**kwargs) + self.scope = scope + self.elevation_level = elevation_level diff --git a/azext/generated/sdk/batch/v2018_12_01/models/batch_error.py b/azext/generated/sdk/batch/v2018_12_01/models/batch_error.py new file mode 100644 index 00000000..3857ac96 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/batch_error.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from msrest.exceptions import HttpOperationError + + +class BatchError(Model): + """An error response received from the Azure Batch service. + + :param code: An identifier for the error. Codes are invariant and are + intended to be consumed programmatically. + :type code: str + :param message: A message describing the error, intended to be suitable + for display in a user interface. + :type message: ~azure.batch.models.ErrorMessage + :param values: A collection of key-value pairs containing additional + details about the error. + :type values: list[~azure.batch.models.BatchErrorDetail] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'ErrorMessage'}, + 'values': {'key': 'values', 'type': '[BatchErrorDetail]'}, + } + + def __init__(self, **kwargs): + super(BatchError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.values = kwargs.get('values', None) + + +class BatchErrorException(HttpOperationError): + """Server responsed with exception of type: 'BatchError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, deserialize, response, *args): + + super(BatchErrorException, self).__init__(deserialize, response, 'BatchError', *args) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/batch_error_detail.py b/azext/generated/sdk/batch/v2018_12_01/models/batch_error_detail.py new file mode 100644 index 00000000..a892678c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/batch_error_detail.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BatchErrorDetail(Model): + """An item of additional information included in an Azure Batch error + response. + + :param key: An identifier specifying the meaning of the Value property. + :type key: str + :param value: The additional information included with the error response. + :type value: str + """ + + _attribute_map = { + 'key': {'key': 'key', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(BatchErrorDetail, self).__init__(**kwargs) + self.key = kwargs.get('key', None) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/batch_error_detail_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/batch_error_detail_py3.py new file mode 100644 index 00000000..8aa8a85b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/batch_error_detail_py3.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BatchErrorDetail(Model): + """An item of additional information included in an Azure Batch error + response. + + :param key: An identifier specifying the meaning of the Value property. + :type key: str + :param value: The additional information included with the error response. + :type value: str + """ + + _attribute_map = { + 'key': {'key': 'key', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, *, key: str=None, value: str=None, **kwargs) -> None: + super(BatchErrorDetail, self).__init__(**kwargs) + self.key = key + self.value = value diff --git a/azext/generated/sdk/batch/v2018_12_01/models/batch_error_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/batch_error_py3.py new file mode 100644 index 00000000..a6e49569 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/batch_error_py3.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from msrest.exceptions import HttpOperationError + + +class BatchError(Model): + """An error response received from the Azure Batch service. + + :param code: An identifier for the error. Codes are invariant and are + intended to be consumed programmatically. + :type code: str + :param message: A message describing the error, intended to be suitable + for display in a user interface. + :type message: ~azure.batch.models.ErrorMessage + :param values: A collection of key-value pairs containing additional + details about the error. + :type values: list[~azure.batch.models.BatchErrorDetail] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'ErrorMessage'}, + 'values': {'key': 'values', 'type': '[BatchErrorDetail]'}, + } + + def __init__(self, *, code: str=None, message=None, values=None, **kwargs) -> None: + super(BatchError, self).__init__(**kwargs) + self.code = code + self.message = message + self.values = values + + +class BatchErrorException(HttpOperationError): + """Server responsed with exception of type: 'BatchError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, deserialize, response, *args): + + super(BatchErrorException, self).__init__(deserialize, response, 'BatchError', *args) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/batch_service_client_enums.py b/azext/generated/sdk/batch/v2018_12_01/models/batch_service_client_enums.py new file mode 100644 index 00000000..83ee302f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/batch_service_client_enums.py @@ -0,0 +1,288 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum + + +class OSType(str, Enum): + + linux = "linux" #: The Linux operating system. + windows = "windows" #: The Windows operating system. + + +class AccessScope(str, Enum): + + job = "job" #: Grants access to perform all operations on the job containing the task. + + +class CertificateState(str, Enum): + + active = "active" #: The certificate is available for use in pools. + deleting = "deleting" #: The user has requested that the certificate be deleted, but the delete operation has not yet completed. You may not reference the certificate when creating or updating pools. + delete_failed = "deletefailed" #: The user requested that the certificate be deleted, but there are pools that still have references to the certificate, or it is still installed on one or more compute nodes. (The latter can occur if the certificate has been removed from the pool, but the node has not yet restarted. Nodes refresh their certificates only when they restart.) You may use the cancel certificate delete operation to cancel the delete, or the delete certificate operation to retry the delete. + + +class CertificateFormat(str, Enum): + + pfx = "pfx" #: The certificate is a PFX (PKCS#12) formatted certificate or certificate chain. + cer = "cer" #: The certificate is a base64-encoded X.509 certificate. + + +class JobAction(str, Enum): + + none = "none" #: Take no action. + disable = "disable" #: Disable the job. This is equivalent to calling the disable job API, with a disableTasks value of requeue. + terminate = "terminate" #: Terminate the job. The terminateReason in the job's executionInfo is set to "TaskFailed". + + +class DependencyAction(str, Enum): + + satisfy = "satisfy" #: Satisfy the task's dependencies. + block = "block" #: Block the task's dependencies. + + +class AutoUserScope(str, Enum): + + task = "task" #: Specifies that the service should create a new user for the task. + pool = "pool" #: Specifies that the task runs as the common auto user account which is created on every node in a pool. + + +class ElevationLevel(str, Enum): + + non_admin = "nonadmin" #: The user is a standard user without elevated access. + admin = "admin" #: The user is a user with elevated access and operates with full Administrator permissions. + + +class LoginMode(str, Enum): + + batch = "batch" #: The LOGON32_LOGON_BATCH Win32 login mode. The batch login mode is recommended for long running parallel processes. + interactive = "interactive" #: The LOGON32_LOGON_INTERACTIVE Win32 login mode. UAC is enabled on Windows VirtualMachineConfiguration pools. If this option is used with an elevated user identity in a Windows VirtualMachineConfiguration pool, the user session will not be elevated unless the application executed by the task command line is configured to always require administrative privilege or to always require maximum privilege. + + +class OutputFileUploadCondition(str, Enum): + + task_success = "tasksuccess" #: Upload the file(s) only after the task process exits with an exit code of 0. + task_failure = "taskfailure" #: Upload the file(s) only after the task process exits with a nonzero exit code. + task_completion = "taskcompletion" #: Upload the file(s) after the task process exits, no matter what the exit code was. + + +class ComputeNodeFillType(str, Enum): + + spread = "spread" #: Tasks should be assigned evenly across all nodes in the pool. + pack = "pack" #: As many tasks as possible (maxTasksPerNode) should be assigned to each node in the pool before any tasks are assigned to the next node in the pool. + + +class CertificateStoreLocation(str, Enum): + + current_user = "currentuser" #: Certificates should be installed to the CurrentUser certificate store. + local_machine = "localmachine" #: Certificates should be installed to the LocalMachine certificate store. + + +class CertificateVisibility(str, Enum): + + start_task = "starttask" #: The certificate should be visible to the user account under which the start task is run. + task = "task" #: The certificate should be visible to the user accounts under which job tasks are run. + remote_user = "remoteuser" #: The certificate should be visible to the user accounts under which users remotely access the node. + + +class CachingType(str, Enum): + + none = "none" #: The caching mode for the disk is not enabled. + read_only = "readonly" #: The caching mode for the disk is read only. + read_write = "readwrite" #: The caching mode for the disk is read and write. + + +class StorageAccountType(str, Enum): + + standard_lrs = "standard_lrs" #: The data disk should use standard locally redundant storage. + premium_lrs = "premium_lrs" #: The data disk should use premium locally redundant storage. + + +class DynamicVNetAssignmentScope(str, Enum): + + none = "none" #: No dynamic VNet assignment is enabled. + job = "job" #: Dynamic VNet assignment is done per-job. + + +class InboundEndpointProtocol(str, Enum): + + tcp = "tcp" #: Use TCP for the endpoint. + udp = "udp" #: Use UDP for the endpoint. + + +class NetworkSecurityGroupRuleAccess(str, Enum): + + allow = "allow" #: Allow access. + deny = "deny" #: Deny access. + + +class PoolLifetimeOption(str, Enum): + + job_schedule = "jobschedule" #: The pool exists for the lifetime of the job schedule. The Batch Service creates the pool when it creates the first job on the schedule. You may apply this option only to job schedules, not to jobs. + job = "job" #: The pool exists for the lifetime of the job to which it is dedicated. The Batch service creates the pool when it creates the job. If the 'job' option is applied to a job schedule, the Batch service creates a new auto pool for every job created on the schedule. + + +class OnAllTasksComplete(str, Enum): + + no_action = "noaction" #: Do nothing. The job remains active unless terminated or disabled by some other means. + terminate_job = "terminatejob" #: Terminate the job. The job's terminateReason is set to 'AllTasksComplete'. + + +class OnTaskFailure(str, Enum): + + no_action = "noaction" #: Do nothing. The job remains active unless terminated or disabled by some other means. + perform_exit_options_job_action = "performexitoptionsjobaction" #: Take the action associated with the task exit condition in the task's exitConditions collection. (This may still result in no action being taken, if that is what the task specifies.) + + +class JobScheduleState(str, Enum): + + active = "active" #: The job schedule is active and will create jobs as per its schedule. + completed = "completed" #: The schedule has terminated, either by reaching its end time or by the user terminating it explicitly. + disabled = "disabled" #: The user has disabled the schedule. The scheduler will not initiate any new jobs will on this schedule, but any existing active job will continue to run. + terminating = "terminating" #: The schedule has no more work to do, or has been explicitly terminated by the user, but the termination operation is still in progress. The scheduler will not initiate any new jobs for this schedule, nor is any existing job active. + deleting = "deleting" #: The user has requested that the schedule be deleted, but the delete operation is still in progress. The scheduler will not initiate any new jobs for this schedule, and will delete any existing jobs and tasks under the schedule, including any active job. The schedule will be deleted when all jobs and tasks under the schedule have been deleted. + + +class ErrorCategory(str, Enum): + + user_error = "usererror" #: The error is due to a user issue, such as misconfiguration. + server_error = "servererror" #: The error is due to an internal server issue. + + +class JobState(str, Enum): + + active = "active" #: The job is available to have tasks scheduled. + disabling = "disabling" #: A user has requested that the job be disabled, but the disable operation is still in progress (for example, waiting for tasks to terminate). + disabled = "disabled" #: A user has disabled the job. No tasks are running, and no new tasks will be scheduled. + enabling = "enabling" #: A user has requested that the job be enabled, but the enable operation is still in progress. + terminating = "terminating" #: The job is about to complete, either because a Job Manager task has completed or because the user has terminated the job, but the terminate operation is still in progress (for example, because Job Release tasks are running). + completed = "completed" #: All tasks have terminated, and the system will not accept any more tasks or any further changes to the job. + deleting = "deleting" #: A user has requested that the job be deleted, but the delete operation is still in progress (for example, because the system is still terminating running tasks). + + +class JobPreparationTaskState(str, Enum): + + running = "running" #: The task is currently running (including retrying). + completed = "completed" #: The task has exited with exit code 0, or the task has exhausted its retry limit, or the Batch service was unable to start the task due to task preparation errors (such as resource file download failures). + + +class TaskExecutionResult(str, Enum): + + success = "success" #: The task ran successfully. + failure = "failure" #: There was an error during processing of the task. The failure may have occurred before the task process was launched, while the task process was executing, or after the task process exited. + + +class JobReleaseTaskState(str, Enum): + + running = "running" #: The task is currently running (including retrying). + completed = "completed" #: The task has exited with exit code 0, or the task has exhausted its retry limit, or the Batch service was unable to start the task due to task preparation errors (such as resource file download failures). + + +class PoolState(str, Enum): + + active = "active" #: The pool is available to run tasks subject to the availability of compute nodes. + deleting = "deleting" #: The user has requested that the pool be deleted, but the delete operation has not yet completed. + + +class AllocationState(str, Enum): + + steady = "steady" #: The pool is not resizing. There are no changes to the number of nodes in the pool in progress. A pool enters this state when it is created and when no operations are being performed on the pool to change the number of nodes. + resizing = "resizing" #: The pool is resizing; that is, compute nodes are being added to or removed from the pool. + stopping = "stopping" #: The pool was resizing, but the user has requested that the resize be stopped, but the stop request has not yet been completed. + + +class TaskState(str, Enum): + + active = "active" #: The task is queued and able to run, but is not currently assigned to a compute node. A task enters this state when it is created, when it is enabled after being disabled, or when it is awaiting a retry after a failed run. + preparing = "preparing" #: The task has been assigned to a compute node, but is waiting for a required Job Preparation task to complete on the node. If the Job Preparation task succeeds, the task will move to running. If the Job Preparation task fails, the task will return to active and will be eligible to be assigned to a different node. + running = "running" #: The task is running on a compute node. This includes task-level preparation such as downloading resource files or deploying application packages specified on the task - it does not necessarily mean that the task command line has started executing. + completed = "completed" #: The task is no longer eligible to run, usually because the task has finished successfully, or the task has finished unsuccessfully and has exhausted its retry limit. A task is also marked as completed if an error occurred launching the task, or when the task has been terminated. + + +class TaskAddStatus(str, Enum): + + success = "success" #: The task was added successfully. + client_error = "clienterror" #: The task failed to add due to a client error and should not be retried without modifying the request as appropriate. + server_error = "servererror" #: Task failed to add due to a server error and can be retried without modification. + + +class SubtaskState(str, Enum): + + preparing = "preparing" #: The task has been assigned to a compute node, but is waiting for a required Job Preparation task to complete on the node. If the Job Preparation task succeeds, the task will move to running. If the Job Preparation task fails, the task will return to active and will be eligible to be assigned to a different node. + running = "running" #: The task is running on a compute node. This includes task-level preparation such as downloading resource files or deploying application packages specified on the task - it does not necessarily mean that the task command line has started executing. + completed = "completed" #: The task is no longer eligible to run, usually because the task has finished successfully, or the task has finished unsuccessfully and has exhausted its retry limit. A task is also marked as completed if an error occurred launching the task, or when the task has been terminated. + + +class StartTaskState(str, Enum): + + running = "running" #: The start task is currently running. + completed = "completed" #: The start task has exited with exit code 0, or the start task has failed and the retry limit has reached, or the start task process did not run due to task preparation errors (such as resource file download failures). + + +class ComputeNodeState(str, Enum): + + idle = "idle" #: The node is not currently running a task. + rebooting = "rebooting" #: The node is rebooting. + reimaging = "reimaging" #: The node is reimaging. + running = "running" #: The node is running one or more tasks (other than a start task). + unusable = "unusable" #: The node cannot be used for task execution due to errors. + creating = "creating" #: The Batch service has obtained the underlying virtual machine from Azure Compute, but it has not yet started to join the pool. + starting = "starting" #: The Batch service is starting on the underlying virtual machine. + waiting_for_start_task = "waitingforstarttask" #: The start task has started running on the compute node, but waitForSuccess is set and the start task has not yet completed. + start_task_failed = "starttaskfailed" #: The start task has failed on the compute node (and exhausted all retries), and waitForSuccess is set. The node is not usable for running tasks. + unknown = "unknown" #: The Batch service has lost contact with the node, and does not know its true state. + leaving_pool = "leavingpool" #: The node is leaving the pool, either because the user explicitly removed it or because the pool is resizing or autoscaling down. + offline = "offline" #: The node is not currently running a task, and scheduling of new tasks to the node is disabled. + preempted = "preempted" #: The low-priority node has been preempted. Tasks which were running on the node when it was preempted will be rescheduled when another node becomes available. + + +class SchedulingState(str, Enum): + + enabled = "enabled" #: Tasks can be scheduled on the node. + disabled = "disabled" #: No new tasks will be scheduled on the node. Tasks already running on the node may still run to completion. All nodes start with scheduling enabled. + + +class DisableJobOption(str, Enum): + + requeue = "requeue" #: Terminate running tasks and requeue them. The tasks will run again when the job is enabled. + terminate = "terminate" #: Terminate running tasks. The tasks will be completed with failureInfo indicating that they were terminated, and will not run again. + wait = "wait" #: Allow currently running tasks to complete. + + +class ComputeNodeDeallocationOption(str, Enum): + + requeue = "requeue" #: Terminate running task processes and requeue the tasks. The tasks will run again when a node is available. Remove nodes as soon as tasks have been terminated. + terminate = "terminate" #: Terminate running tasks. The tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Remove nodes as soon as tasks have been terminated. + task_completion = "taskcompletion" #: Allow currently running tasks to complete. Schedule no new tasks while waiting. Remove nodes when all tasks have completed. + retained_data = "retaineddata" #: Allow currently running tasks to complete, then wait for all task data retention periods to expire. Schedule no new tasks while waiting. Remove nodes when all task retention periods have expired. + + +class ComputeNodeRebootOption(str, Enum): + + requeue = "requeue" #: Terminate running task processes and requeue the tasks. The tasks will run again when a node is available. Restart the node as soon as tasks have been terminated. + terminate = "terminate" #: Terminate running tasks. The tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Restart the node as soon as tasks have been terminated. + task_completion = "taskcompletion" #: Allow currently running tasks to complete. Schedule no new tasks while waiting. Restart the node when all tasks have completed. + retained_data = "retaineddata" #: Allow currently running tasks to complete, then wait for all task data retention periods to expire. Schedule no new tasks while waiting. Restart the node when all task retention periods have expired. + + +class ComputeNodeReimageOption(str, Enum): + + requeue = "requeue" #: Terminate running task processes and requeue the tasks. The tasks will run again when a node is available. Reimage the node as soon as tasks have been terminated. + terminate = "terminate" #: Terminate running tasks. The tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Reimage the node as soon as tasks have been terminated. + task_completion = "taskcompletion" #: Allow currently running tasks to complete. Schedule no new tasks while waiting. Reimage the node when all tasks have completed. + retained_data = "retaineddata" #: Allow currently running tasks to complete, then wait for all task data retention periods to expire. Schedule no new tasks while waiting. Reimage the node when all task retention periods have expired. + + +class DisableComputeNodeSchedulingOption(str, Enum): + + requeue = "requeue" #: Terminate running task processes and requeue the tasks. The tasks may run again on other compute nodes, or when task scheduling is re-enabled on this node. Enter offline state as soon as tasks have been terminated. + terminate = "terminate" #: Terminate running tasks. The tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Enter offline state as soon as tasks have been terminated. + task_completion = "taskcompletion" #: Allow currently running tasks to complete. Schedule no new tasks while waiting. Enter offline state when all tasks have completed. diff --git a/azext/generated/sdk/batch/v2018_12_01/models/certificate.py b/azext/generated/sdk/batch/v2018_12_01/models/certificate.py new file mode 100644 index 00000000..e44c3f85 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/certificate.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Certificate(Model): + """A certificate that can be installed on compute nodes and can be used to + authenticate operations on the machine. + + :param thumbprint: The X.509 thumbprint of the certificate. This is a + sequence of up to 40 hex digits. + :type thumbprint: str + :param thumbprint_algorithm: The algorithm used to derive the thumbprint. + :type thumbprint_algorithm: str + :param url: The URL of the certificate. + :type url: str + :param state: The current state of the certificate. Possible values + include: 'active', 'deleting', 'deleteFailed' + :type state: str or ~azure.batch.models.CertificateState + :param state_transition_time: The time at which the certificate entered + its current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the certificate. This + property is not set if the certificate is in its initial active state. + Possible values include: 'active', 'deleting', 'deleteFailed' + :type previous_state: str or ~azure.batch.models.CertificateState + :param previous_state_transition_time: The time at which the certificate + entered its previous state. This property is not set if the certificate is + in its initial Active state. + :type previous_state_transition_time: datetime + :param public_data: The public part of the certificate as a base-64 + encoded .cer file. + :type public_data: str + :param delete_certificate_error: The error that occurred on the last + attempt to delete this certificate. This property is set only if the + certificate is in the DeleteFailed state. + :type delete_certificate_error: ~azure.batch.models.DeleteCertificateError + """ + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'CertificateState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'CertificateState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'public_data': {'key': 'publicData', 'type': 'str'}, + 'delete_certificate_error': {'key': 'deleteCertificateError', 'type': 'DeleteCertificateError'}, + } + + def __init__(self, **kwargs): + super(Certificate, self).__init__(**kwargs) + self.thumbprint = kwargs.get('thumbprint', None) + self.thumbprint_algorithm = kwargs.get('thumbprint_algorithm', None) + self.url = kwargs.get('url', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.previous_state = kwargs.get('previous_state', None) + self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) + self.public_data = kwargs.get('public_data', None) + self.delete_certificate_error = kwargs.get('delete_certificate_error', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/certificate_add_options.py b/azext/generated/sdk/batch/v2018_12_01/models/certificate_add_options.py new file mode 100644 index 00000000..f2c8d5bb --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/certificate_add_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(CertificateAddOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/certificate_add_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/certificate_add_options_py3.py new file mode 100644 index 00000000..c7d61b36 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/certificate_add_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(CertificateAddOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/certificate_add_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/certificate_add_parameter.py new file mode 100644 index 00000000..809efd66 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/certificate_add_parameter.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateAddParameter(Model): + """A certificate that can be installed on compute nodes and can be used to + authenticate operations on the machine. + + All required parameters must be populated in order to send to Azure. + + :param thumbprint: Required. The X.509 thumbprint of the certificate. This + is a sequence of up to 40 hex digits (it may include spaces but these are + removed). + :type thumbprint: str + :param thumbprint_algorithm: Required. The algorithm used to derive the + thumbprint. This must be sha1. + :type thumbprint_algorithm: str + :param data: Required. The base64-encoded contents of the certificate. The + maximum size is 10KB. + :type data: str + :param certificate_format: The format of the certificate data. Possible + values include: 'pfx', 'cer' + :type certificate_format: str or ~azure.batch.models.CertificateFormat + :param password: The password to access the certificate's private key. + This is required if the certificate format is pfx. It should be omitted if + the certificate format is cer. + :type password: str + """ + + _validation = { + 'thumbprint': {'required': True}, + 'thumbprint_algorithm': {'required': True}, + 'data': {'required': True}, + } + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'data': {'key': 'data', 'type': 'str'}, + 'certificate_format': {'key': 'certificateFormat', 'type': 'CertificateFormat'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(CertificateAddParameter, self).__init__(**kwargs) + self.thumbprint = kwargs.get('thumbprint', None) + self.thumbprint_algorithm = kwargs.get('thumbprint_algorithm', None) + self.data = kwargs.get('data', None) + self.certificate_format = kwargs.get('certificate_format', None) + self.password = kwargs.get('password', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/certificate_add_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/certificate_add_parameter_py3.py new file mode 100644 index 00000000..2a560b2b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/certificate_add_parameter_py3.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateAddParameter(Model): + """A certificate that can be installed on compute nodes and can be used to + authenticate operations on the machine. + + All required parameters must be populated in order to send to Azure. + + :param thumbprint: Required. The X.509 thumbprint of the certificate. This + is a sequence of up to 40 hex digits (it may include spaces but these are + removed). + :type thumbprint: str + :param thumbprint_algorithm: Required. The algorithm used to derive the + thumbprint. This must be sha1. + :type thumbprint_algorithm: str + :param data: Required. The base64-encoded contents of the certificate. The + maximum size is 10KB. + :type data: str + :param certificate_format: The format of the certificate data. Possible + values include: 'pfx', 'cer' + :type certificate_format: str or ~azure.batch.models.CertificateFormat + :param password: The password to access the certificate's private key. + This is required if the certificate format is pfx. It should be omitted if + the certificate format is cer. + :type password: str + """ + + _validation = { + 'thumbprint': {'required': True}, + 'thumbprint_algorithm': {'required': True}, + 'data': {'required': True}, + } + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'data': {'key': 'data', 'type': 'str'}, + 'certificate_format': {'key': 'certificateFormat', 'type': 'CertificateFormat'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, *, thumbprint: str, thumbprint_algorithm: str, data: str, certificate_format=None, password: str=None, **kwargs) -> None: + super(CertificateAddParameter, self).__init__(**kwargs) + self.thumbprint = thumbprint + self.thumbprint_algorithm = thumbprint_algorithm + self.data = data + self.certificate_format = certificate_format + self.password = password diff --git a/azext/generated/sdk/batch/v2018_12_01/models/certificate_cancel_deletion_options.py b/azext/generated/sdk/batch/v2018_12_01/models/certificate_cancel_deletion_options.py new file mode 100644 index 00000000..5c7c936c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/certificate_cancel_deletion_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateCancelDeletionOptions(Model): + """Additional parameters for cancel_deletion operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(CertificateCancelDeletionOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/certificate_cancel_deletion_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/certificate_cancel_deletion_options_py3.py new file mode 100644 index 00000000..8afbcf24 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/certificate_cancel_deletion_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateCancelDeletionOptions(Model): + """Additional parameters for cancel_deletion operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(CertificateCancelDeletionOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/certificate_delete_options.py b/azext/generated/sdk/batch/v2018_12_01/models/certificate_delete_options.py new file mode 100644 index 00000000..5ff7ee83 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/certificate_delete_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(CertificateDeleteOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/certificate_delete_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/certificate_delete_options_py3.py new file mode 100644 index 00000000..47f91b10 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/certificate_delete_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(CertificateDeleteOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/certificate_get_options.py b/azext/generated/sdk/batch/v2018_12_01/models/certificate_get_options.py new file mode 100644 index 00000000..2b474c17 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/certificate_get_options.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(CertificateGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/certificate_get_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/certificate_get_options_py3.py new file mode 100644 index 00000000..4bd6bb70 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/certificate_get_options_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(CertificateGetOptions, self).__init__(**kwargs) + self.select = select + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/certificate_list_options.py b/azext/generated/sdk/batch/v2018_12_01/models/certificate_list_options.py new file mode 100644 index 00000000..cb3134af --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/certificate_list_options.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 certificates can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(CertificateListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/certificate_list_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/certificate_list_options_py3.py new file mode 100644 index 00000000..461b8044 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/certificate_list_options_py3.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 certificates can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(CertificateListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/certificate_paged.py b/azext/generated/sdk/batch/v2018_12_01/models/certificate_paged.py new file mode 100644 index 00000000..985d7838 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/certificate_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class CertificatePaged(Paged): + """ + A paging container for iterating over a list of :class:`Certificate ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[Certificate]'} + } + + def __init__(self, *args, **kwargs): + + super(CertificatePaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/certificate_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/certificate_py3.py new file mode 100644 index 00000000..4e0b71c5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/certificate_py3.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Certificate(Model): + """A certificate that can be installed on compute nodes and can be used to + authenticate operations on the machine. + + :param thumbprint: The X.509 thumbprint of the certificate. This is a + sequence of up to 40 hex digits. + :type thumbprint: str + :param thumbprint_algorithm: The algorithm used to derive the thumbprint. + :type thumbprint_algorithm: str + :param url: The URL of the certificate. + :type url: str + :param state: The current state of the certificate. Possible values + include: 'active', 'deleting', 'deleteFailed' + :type state: str or ~azure.batch.models.CertificateState + :param state_transition_time: The time at which the certificate entered + its current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the certificate. This + property is not set if the certificate is in its initial active state. + Possible values include: 'active', 'deleting', 'deleteFailed' + :type previous_state: str or ~azure.batch.models.CertificateState + :param previous_state_transition_time: The time at which the certificate + entered its previous state. This property is not set if the certificate is + in its initial Active state. + :type previous_state_transition_time: datetime + :param public_data: The public part of the certificate as a base-64 + encoded .cer file. + :type public_data: str + :param delete_certificate_error: The error that occurred on the last + attempt to delete this certificate. This property is set only if the + certificate is in the DeleteFailed state. + :type delete_certificate_error: ~azure.batch.models.DeleteCertificateError + """ + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'CertificateState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'CertificateState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'public_data': {'key': 'publicData', 'type': 'str'}, + 'delete_certificate_error': {'key': 'deleteCertificateError', 'type': 'DeleteCertificateError'}, + } + + def __init__(self, *, thumbprint: str=None, thumbprint_algorithm: str=None, url: str=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, public_data: str=None, delete_certificate_error=None, **kwargs) -> None: + super(Certificate, self).__init__(**kwargs) + self.thumbprint = thumbprint + self.thumbprint_algorithm = thumbprint_algorithm + self.url = url + self.state = state + self.state_transition_time = state_transition_time + self.previous_state = previous_state + self.previous_state_transition_time = previous_state_transition_time + self.public_data = public_data + self.delete_certificate_error = delete_certificate_error diff --git a/azext/generated/sdk/batch/v2018_12_01/models/certificate_reference.py b/azext/generated/sdk/batch/v2018_12_01/models/certificate_reference.py new file mode 100644 index 00000000..976c1908 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/certificate_reference.py @@ -0,0 +1,70 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateReference(Model): + """A reference to a certificate to be installed on compute nodes in a pool. + + All required parameters must be populated in order to send to Azure. + + :param thumbprint: Required. The thumbprint of the certificate. + :type thumbprint: str + :param thumbprint_algorithm: Required. The algorithm with which the + thumbprint is associated. This must be sha1. + :type thumbprint_algorithm: str + :param store_location: The location of the certificate store on the + compute node into which to install the certificate. The default value is + currentuser. This property is applicable only for pools configured with + Windows nodes (that is, created with cloudServiceConfiguration, or with + virtualMachineConfiguration using a Windows image reference). For Linux + compute nodes, the certificates are stored in a directory inside the task + working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is + supplied to the task to query for this location. For certificates with + visibility of 'remoteUser', a 'certs' directory is created in the user's + home directory (e.g., /home/{user-name}/certs) and certificates are placed + in that directory. Possible values include: 'currentUser', 'localMachine' + :type store_location: str or ~azure.batch.models.CertificateStoreLocation + :param store_name: The name of the certificate store on the compute node + into which to install the certificate. This property is applicable only + for pools configured with Windows nodes (that is, created with + cloudServiceConfiguration, or with virtualMachineConfiguration using a + Windows image reference). Common store names include: My, Root, CA, Trust, + Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but + any custom store name can also be used. The default value is My. + :type store_name: str + :param visibility: Which user accounts on the compute node should have + access to the private data of the certificate. You can specify more than + one visibility in this collection. The default is all accounts. + :type visibility: list[str or ~azure.batch.models.CertificateVisibility] + """ + + _validation = { + 'thumbprint': {'required': True}, + 'thumbprint_algorithm': {'required': True}, + } + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'store_location': {'key': 'storeLocation', 'type': 'CertificateStoreLocation'}, + 'store_name': {'key': 'storeName', 'type': 'str'}, + 'visibility': {'key': 'visibility', 'type': '[CertificateVisibility]'}, + } + + def __init__(self, **kwargs): + super(CertificateReference, self).__init__(**kwargs) + self.thumbprint = kwargs.get('thumbprint', None) + self.thumbprint_algorithm = kwargs.get('thumbprint_algorithm', None) + self.store_location = kwargs.get('store_location', None) + self.store_name = kwargs.get('store_name', None) + self.visibility = kwargs.get('visibility', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/certificate_reference_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/certificate_reference_py3.py new file mode 100644 index 00000000..46e52e27 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/certificate_reference_py3.py @@ -0,0 +1,70 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateReference(Model): + """A reference to a certificate to be installed on compute nodes in a pool. + + All required parameters must be populated in order to send to Azure. + + :param thumbprint: Required. The thumbprint of the certificate. + :type thumbprint: str + :param thumbprint_algorithm: Required. The algorithm with which the + thumbprint is associated. This must be sha1. + :type thumbprint_algorithm: str + :param store_location: The location of the certificate store on the + compute node into which to install the certificate. The default value is + currentuser. This property is applicable only for pools configured with + Windows nodes (that is, created with cloudServiceConfiguration, or with + virtualMachineConfiguration using a Windows image reference). For Linux + compute nodes, the certificates are stored in a directory inside the task + working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is + supplied to the task to query for this location. For certificates with + visibility of 'remoteUser', a 'certs' directory is created in the user's + home directory (e.g., /home/{user-name}/certs) and certificates are placed + in that directory. Possible values include: 'currentUser', 'localMachine' + :type store_location: str or ~azure.batch.models.CertificateStoreLocation + :param store_name: The name of the certificate store on the compute node + into which to install the certificate. This property is applicable only + for pools configured with Windows nodes (that is, created with + cloudServiceConfiguration, or with virtualMachineConfiguration using a + Windows image reference). Common store names include: My, Root, CA, Trust, + Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but + any custom store name can also be used. The default value is My. + :type store_name: str + :param visibility: Which user accounts on the compute node should have + access to the private data of the certificate. You can specify more than + one visibility in this collection. The default is all accounts. + :type visibility: list[str or ~azure.batch.models.CertificateVisibility] + """ + + _validation = { + 'thumbprint': {'required': True}, + 'thumbprint_algorithm': {'required': True}, + } + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'store_location': {'key': 'storeLocation', 'type': 'CertificateStoreLocation'}, + 'store_name': {'key': 'storeName', 'type': 'str'}, + 'visibility': {'key': 'visibility', 'type': '[CertificateVisibility]'}, + } + + def __init__(self, *, thumbprint: str, thumbprint_algorithm: str, store_location=None, store_name: str=None, visibility=None, **kwargs) -> None: + super(CertificateReference, self).__init__(**kwargs) + self.thumbprint = thumbprint + self.thumbprint_algorithm = thumbprint_algorithm + self.store_location = store_location + self.store_name = store_name + self.visibility = visibility diff --git a/azext/generated/sdk/batch/v2018_12_01/models/cloud_job.py b/azext/generated/sdk/batch/v2018_12_01/models/cloud_job.py new file mode 100644 index 00000000..4075515a --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/cloud_job.py @@ -0,0 +1,163 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudJob(Model): + """An Azure Batch job. + + :param id: A string that uniquely identifies the job within the account. + The ID is case-preserving and case-insensitive (that is, you may not have + two IDs within an account that differ only by case). + :type id: str + :param display_name: The display name for the job. + :type display_name: str + :param uses_task_dependencies: Whether tasks in the job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + :param url: The URL of the job. + :type url: str + :param e_tag: The ETag of the job. This is an opaque string. You can use + it to detect whether the job has changed between requests. In particular, + you can be pass the ETag when updating a job to specify that your changes + should take effect only if nobody else has modified the job in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the job. This is the last + time at which the job level data, such as the job state or priority, + changed. It does not factor in task-level changes such as adding new tasks + or tasks changing state. + :type last_modified: datetime + :param creation_time: The creation time of the job. + :type creation_time: datetime + :param state: The current state of the job. Possible values include: + 'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed', + 'deleting' + :type state: str or ~azure.batch.models.JobState + :param state_transition_time: The time at which the job entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the job. This property is not + set if the job is in its initial Active state. Possible values include: + 'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed', + 'deleting' + :type previous_state: str or ~azure.batch.models.JobState + :param previous_state_transition_time: The time at which the job entered + its previous state. This property is not set if the job is in its initial + Active state. + :type previous_state_transition_time: datetime + :param priority: The priority of the job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. The default value is 0. + :type priority: int + :param constraints: The execution constraints for the job. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: Details of a Job Manager task to be launched when + the job is started. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation task. The Job Preparation + task is a special task run on each node before any other task of the job. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release task. The Job Release task is a + special task run at the end of the job on each node that has run any other + task of the job. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: The list of common environment + variable settings. These environment variables are set for all tasks in + the job (including the Job Manager, Job Preparation and Job Release + tasks). Individual tasks can override an environment setting specified + here by specifying the same setting name with a different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: The pool settings associated with the job. + :type pool_info: ~azure.batch.models.PoolInformation + :param on_all_tasks_complete: The action the Batch service should take + when all tasks in the job are in the completed state. The default is + noaction. Possible values include: 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + task in the job fails. A task is considered to have failed if has a + failureInfo. A failureInfo is set if the task completes with a non-zero + exit code after exhausting its retry count, or if there was an error + starting the task, for example due to a resource file download error. The + default is noaction. Possible values include: 'noAction', + 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param network_configuration: The network configuration for the job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration + :param metadata: A list of name-value pairs associated with the job as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param execution_info: The execution information for the job. + :type execution_info: ~azure.batch.models.JobExecutionInformation + :param stats: Resource usage statistics for the entire lifetime of the + job. This property is populated only if the CloudJob was retrieved with an + expand clause including the 'stats' attribute; otherwise it is null. The + statistics may not be immediately available. The Batch service performs + periodic roll-up of statistics. The typical delay is about 30 minutes. + :type stats: ~azure.batch.models.JobStatistics + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'JobState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'execution_info': {'key': 'executionInfo', 'type': 'JobExecutionInformation'}, + 'stats': {'key': 'stats', 'type': 'JobStatistics'}, + } + + def __init__(self, **kwargs): + super(CloudJob, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.uses_task_dependencies = kwargs.get('uses_task_dependencies', None) + self.url = kwargs.get('url', None) + self.e_tag = kwargs.get('e_tag', None) + self.last_modified = kwargs.get('last_modified', None) + self.creation_time = kwargs.get('creation_time', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.previous_state = kwargs.get('previous_state', None) + self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) + self.priority = kwargs.get('priority', None) + self.constraints = kwargs.get('constraints', None) + self.job_manager_task = kwargs.get('job_manager_task', None) + self.job_preparation_task = kwargs.get('job_preparation_task', None) + self.job_release_task = kwargs.get('job_release_task', None) + self.common_environment_settings = kwargs.get('common_environment_settings', None) + self.pool_info = kwargs.get('pool_info', None) + self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) + self.on_task_failure = kwargs.get('on_task_failure', None) + self.network_configuration = kwargs.get('network_configuration', None) + self.metadata = kwargs.get('metadata', None) + self.execution_info = kwargs.get('execution_info', None) + self.stats = kwargs.get('stats', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/cloud_job_paged.py b/azext/generated/sdk/batch/v2018_12_01/models/cloud_job_paged.py new file mode 100644 index 00000000..c642458f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/cloud_job_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class CloudJobPaged(Paged): + """ + A paging container for iterating over a list of :class:`CloudJob ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[CloudJob]'} + } + + def __init__(self, *args, **kwargs): + + super(CloudJobPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/cloud_job_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/cloud_job_py3.py new file mode 100644 index 00000000..c10735b4 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/cloud_job_py3.py @@ -0,0 +1,163 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudJob(Model): + """An Azure Batch job. + + :param id: A string that uniquely identifies the job within the account. + The ID is case-preserving and case-insensitive (that is, you may not have + two IDs within an account that differ only by case). + :type id: str + :param display_name: The display name for the job. + :type display_name: str + :param uses_task_dependencies: Whether tasks in the job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + :param url: The URL of the job. + :type url: str + :param e_tag: The ETag of the job. This is an opaque string. You can use + it to detect whether the job has changed between requests. In particular, + you can be pass the ETag when updating a job to specify that your changes + should take effect only if nobody else has modified the job in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the job. This is the last + time at which the job level data, such as the job state or priority, + changed. It does not factor in task-level changes such as adding new tasks + or tasks changing state. + :type last_modified: datetime + :param creation_time: The creation time of the job. + :type creation_time: datetime + :param state: The current state of the job. Possible values include: + 'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed', + 'deleting' + :type state: str or ~azure.batch.models.JobState + :param state_transition_time: The time at which the job entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the job. This property is not + set if the job is in its initial Active state. Possible values include: + 'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed', + 'deleting' + :type previous_state: str or ~azure.batch.models.JobState + :param previous_state_transition_time: The time at which the job entered + its previous state. This property is not set if the job is in its initial + Active state. + :type previous_state_transition_time: datetime + :param priority: The priority of the job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. The default value is 0. + :type priority: int + :param constraints: The execution constraints for the job. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: Details of a Job Manager task to be launched when + the job is started. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation task. The Job Preparation + task is a special task run on each node before any other task of the job. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release task. The Job Release task is a + special task run at the end of the job on each node that has run any other + task of the job. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: The list of common environment + variable settings. These environment variables are set for all tasks in + the job (including the Job Manager, Job Preparation and Job Release + tasks). Individual tasks can override an environment setting specified + here by specifying the same setting name with a different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: The pool settings associated with the job. + :type pool_info: ~azure.batch.models.PoolInformation + :param on_all_tasks_complete: The action the Batch service should take + when all tasks in the job are in the completed state. The default is + noaction. Possible values include: 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + task in the job fails. A task is considered to have failed if has a + failureInfo. A failureInfo is set if the task completes with a non-zero + exit code after exhausting its retry count, or if there was an error + starting the task, for example due to a resource file download error. The + default is noaction. Possible values include: 'noAction', + 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param network_configuration: The network configuration for the job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration + :param metadata: A list of name-value pairs associated with the job as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param execution_info: The execution information for the job. + :type execution_info: ~azure.batch.models.JobExecutionInformation + :param stats: Resource usage statistics for the entire lifetime of the + job. This property is populated only if the CloudJob was retrieved with an + expand clause including the 'stats' attribute; otherwise it is null. The + statistics may not be immediately available. The Batch service performs + periodic roll-up of statistics. The typical delay is about 30 minutes. + :type stats: ~azure.batch.models.JobStatistics + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'JobState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'execution_info': {'key': 'executionInfo', 'type': 'JobExecutionInformation'}, + 'stats': {'key': 'stats', 'type': 'JobStatistics'}, + } + + def __init__(self, *, id: str=None, display_name: str=None, uses_task_dependencies: bool=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, priority: int=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, pool_info=None, on_all_tasks_complete=None, on_task_failure=None, network_configuration=None, metadata=None, execution_info=None, stats=None, **kwargs) -> None: + super(CloudJob, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.uses_task_dependencies = uses_task_dependencies + self.url = url + self.e_tag = e_tag + self.last_modified = last_modified + self.creation_time = creation_time + self.state = state + self.state_transition_time = state_transition_time + self.previous_state = previous_state + self.previous_state_transition_time = previous_state_transition_time + self.priority = priority + self.constraints = constraints + self.job_manager_task = job_manager_task + self.job_preparation_task = job_preparation_task + self.job_release_task = job_release_task + self.common_environment_settings = common_environment_settings + self.pool_info = pool_info + self.on_all_tasks_complete = on_all_tasks_complete + self.on_task_failure = on_task_failure + self.network_configuration = network_configuration + self.metadata = metadata + self.execution_info = execution_info + self.stats = stats diff --git a/azext/generated/sdk/batch/v2018_12_01/models/cloud_job_schedule.py b/azext/generated/sdk/batch/v2018_12_01/models/cloud_job_schedule.py new file mode 100644 index 00000000..1a2a33fd --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/cloud_job_schedule.py @@ -0,0 +1,107 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudJobSchedule(Model): + """A job schedule that allows recurring jobs by specifying when to run jobs + and a specification used to create each job. + + :param id: A string that uniquely identifies the schedule within the + account. + :type id: str + :param display_name: The display name for the schedule. + :type display_name: str + :param url: The URL of the job schedule. + :type url: str + :param e_tag: The ETag of the job schedule. This is an opaque string. You + can use it to detect whether the job schedule has changed between + requests. In particular, you can be pass the ETag with an Update Job + Schedule request to specify that your changes should take effect only if + nobody else has modified the schedule in the meantime. + :type e_tag: str + :param last_modified: The last modified time of the job schedule. This is + the last time at which the schedule level data, such as the job + specification or recurrence information, changed. It does not factor in + job-level changes such as new jobs being created or jobs changing state. + :type last_modified: datetime + :param creation_time: The creation time of the job schedule. + :type creation_time: datetime + :param state: The current state of the job schedule. Possible values + include: 'active', 'completed', 'disabled', 'terminating', 'deleting' + :type state: str or ~azure.batch.models.JobScheduleState + :param state_transition_time: The time at which the job schedule entered + the current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the job schedule. This + property is not present if the job schedule is in its initial active + state. Possible values include: 'active', 'completed', 'disabled', + 'terminating', 'deleting' + :type previous_state: str or ~azure.batch.models.JobScheduleState + :param previous_state_transition_time: The time at which the job schedule + entered its previous state. This property is not present if the job + schedule is in its initial active state. + :type previous_state_transition_time: datetime + :param schedule: The schedule according to which jobs will be created. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: The details of the jobs to be created on this + schedule. + :type job_specification: ~azure.batch.models.JobSpecification + :param execution_info: Information about jobs that have been and will be + run under this schedule. + :type execution_info: ~azure.batch.models.JobScheduleExecutionInformation + :param metadata: A list of name-value pairs associated with the schedule + as metadata. The Batch service does not assign any meaning to metadata; it + is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param stats: The lifetime resource usage statistics for the job schedule. + The statistics may not be immediately available. The Batch service + performs periodic roll-up of statistics. The typical delay is about 30 + minutes. + :type stats: ~azure.batch.models.JobScheduleStatistics + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobScheduleState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'JobScheduleState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'execution_info': {'key': 'executionInfo', 'type': 'JobScheduleExecutionInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'stats': {'key': 'stats', 'type': 'JobScheduleStatistics'}, + } + + def __init__(self, **kwargs): + super(CloudJobSchedule, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.url = kwargs.get('url', None) + self.e_tag = kwargs.get('e_tag', None) + self.last_modified = kwargs.get('last_modified', None) + self.creation_time = kwargs.get('creation_time', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.previous_state = kwargs.get('previous_state', None) + self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) + self.schedule = kwargs.get('schedule', None) + self.job_specification = kwargs.get('job_specification', None) + self.execution_info = kwargs.get('execution_info', None) + self.metadata = kwargs.get('metadata', None) + self.stats = kwargs.get('stats', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/cloud_job_schedule_paged.py b/azext/generated/sdk/batch/v2018_12_01/models/cloud_job_schedule_paged.py new file mode 100644 index 00000000..3abb6f15 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/cloud_job_schedule_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class CloudJobSchedulePaged(Paged): + """ + A paging container for iterating over a list of :class:`CloudJobSchedule ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[CloudJobSchedule]'} + } + + def __init__(self, *args, **kwargs): + + super(CloudJobSchedulePaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/cloud_job_schedule_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/cloud_job_schedule_py3.py new file mode 100644 index 00000000..1542fe56 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/cloud_job_schedule_py3.py @@ -0,0 +1,107 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudJobSchedule(Model): + """A job schedule that allows recurring jobs by specifying when to run jobs + and a specification used to create each job. + + :param id: A string that uniquely identifies the schedule within the + account. + :type id: str + :param display_name: The display name for the schedule. + :type display_name: str + :param url: The URL of the job schedule. + :type url: str + :param e_tag: The ETag of the job schedule. This is an opaque string. You + can use it to detect whether the job schedule has changed between + requests. In particular, you can be pass the ETag with an Update Job + Schedule request to specify that your changes should take effect only if + nobody else has modified the schedule in the meantime. + :type e_tag: str + :param last_modified: The last modified time of the job schedule. This is + the last time at which the schedule level data, such as the job + specification or recurrence information, changed. It does not factor in + job-level changes such as new jobs being created or jobs changing state. + :type last_modified: datetime + :param creation_time: The creation time of the job schedule. + :type creation_time: datetime + :param state: The current state of the job schedule. Possible values + include: 'active', 'completed', 'disabled', 'terminating', 'deleting' + :type state: str or ~azure.batch.models.JobScheduleState + :param state_transition_time: The time at which the job schedule entered + the current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the job schedule. This + property is not present if the job schedule is in its initial active + state. Possible values include: 'active', 'completed', 'disabled', + 'terminating', 'deleting' + :type previous_state: str or ~azure.batch.models.JobScheduleState + :param previous_state_transition_time: The time at which the job schedule + entered its previous state. This property is not present if the job + schedule is in its initial active state. + :type previous_state_transition_time: datetime + :param schedule: The schedule according to which jobs will be created. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: The details of the jobs to be created on this + schedule. + :type job_specification: ~azure.batch.models.JobSpecification + :param execution_info: Information about jobs that have been and will be + run under this schedule. + :type execution_info: ~azure.batch.models.JobScheduleExecutionInformation + :param metadata: A list of name-value pairs associated with the schedule + as metadata. The Batch service does not assign any meaning to metadata; it + is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param stats: The lifetime resource usage statistics for the job schedule. + The statistics may not be immediately available. The Batch service + performs periodic roll-up of statistics. The typical delay is about 30 + minutes. + :type stats: ~azure.batch.models.JobScheduleStatistics + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobScheduleState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'JobScheduleState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'execution_info': {'key': 'executionInfo', 'type': 'JobScheduleExecutionInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'stats': {'key': 'stats', 'type': 'JobScheduleStatistics'}, + } + + def __init__(self, *, id: str=None, display_name: str=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, schedule=None, job_specification=None, execution_info=None, metadata=None, stats=None, **kwargs) -> None: + super(CloudJobSchedule, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.url = url + self.e_tag = e_tag + self.last_modified = last_modified + self.creation_time = creation_time + self.state = state + self.state_transition_time = state_transition_time + self.previous_state = previous_state + self.previous_state_transition_time = previous_state_transition_time + self.schedule = schedule + self.job_specification = job_specification + self.execution_info = execution_info + self.metadata = metadata + self.stats = stats diff --git a/azext/generated/sdk/batch/v2018_12_01/models/cloud_pool.py b/azext/generated/sdk/batch/v2018_12_01/models/cloud_pool.py new file mode 100644 index 00000000..8456a060 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/cloud_pool.py @@ -0,0 +1,245 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudPool(Model): + """A pool in the Azure Batch service. + + :param id: A string that uniquely identifies the pool within the account. + The ID can contain any combination of alphanumeric characters including + hyphens and underscores, and cannot contain more than 64 characters. The + ID is case-preserving and case-insensitive (that is, you may not have two + IDs within an account that differ only by case). + :type id: str + :param display_name: The display name for the pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param url: The URL of the pool. + :type url: str + :param e_tag: The ETag of the pool. This is an opaque string. You can use + it to detect whether the pool has changed between requests. In particular, + you can be pass the ETag when updating a pool to specify that your changes + should take effect only if nobody else has modified the pool in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the pool. This is the last + time at which the pool level data, such as the targetDedicatedNodes or + enableAutoscale settings, changed. It does not factor in node-level + changes such as a compute node changing state. + :type last_modified: datetime + :param creation_time: The creation time of the pool. + :type creation_time: datetime + :param state: The current state of the pool. Possible values include: + 'active', 'deleting' + :type state: str or ~azure.batch.models.PoolState + :param state_transition_time: The time at which the pool entered its + current state. + :type state_transition_time: datetime + :param allocation_state: Whether the pool is resizing. Possible values + include: 'steady', 'resizing', 'stopping' + :type allocation_state: str or ~azure.batch.models.AllocationState + :param allocation_state_transition_time: The time at which the pool + entered its current allocation state. + :type allocation_state_transition_time: datetime + :param vm_size: The size of virtual machines in the pool. All virtual + machines in a pool are the same size. For information about available + sizes of virtual machines in pools, see Choose a VM size for compute nodes + in an Azure Batch pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the pool. This property and virtualMachineConfiguration are mutually + exclusive and one of the properties must be specified. This property + cannot be specified if the Batch account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the pool. This property and cloudServiceConfiguration are mutually + exclusive and one of the properties must be specified. + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param resize_timeout: The timeout for allocation of compute nodes to the + pool. This is the timeout for the most recent resize operation. (The + initial sizing when the pool is created counts as a resize.) The default + value is 15 minutes. + :type resize_timeout: timedelta + :param resize_errors: A list of errors encountered while performing the + last resize on the pool. This property is set only if one or more errors + occurred during the last pool resize, and only when the pool + allocationState is Steady. + :type resize_errors: list[~azure.batch.models.ResizeError] + :param current_dedicated_nodes: The number of dedicated compute nodes + currently in the pool. + :type current_dedicated_nodes: int + :param current_low_priority_nodes: The number of low-priority compute + nodes currently in the pool. Low-priority compute nodes which have been + preempted are included in this count. + :type current_low_priority_nodes: int + :param target_dedicated_nodes: The desired number of dedicated compute + nodes in the pool. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + compute nodes in the pool. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + property is required and the pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: A formula for the desired number of compute + nodes in the pool. This property is set only if the pool automatically + scales, i.e. enableAutoScale is true. + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the pool size according to the autoscale formula. + This property is set only if the pool automatically scales, i.e. + enableAutoScale is true. + :type auto_scale_evaluation_interval: timedelta + :param auto_scale_run: The results and errors from the last execution of + the autoscale formula. This property is set only if the pool automatically + scales, i.e. enableAutoScale is true. + :type auto_scale_run: ~azure.batch.models.AutoScaleRun + :param enable_inter_node_communication: Whether the pool permits direct + communication between nodes. This imposes restrictions on which nodes can + be assigned to the pool. Specifying this value can reduce the chance of + the requested number of nodes to be allocated in the pool. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A task specified to run on each compute node as it + joins the pool. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: The list of certificates to be installed on + each compute node in the pool. For Windows compute nodes, the Batch + service installs the certificates to the specified certificate store and + location. For Linux compute nodes, the certificates are stored in a + directory inside the task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this + location. For certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of application packages to + be installed on each compute node in the pool. Changes to application + package references affect all new compute nodes joining the pool, but do + not affect compute nodes that are already in the pool until they are + rebooted or reimaged. There is a maximum of 10 application package + references on any given pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each compute node in the pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + pool creation will fail. + :type application_licenses: list[str] + :param max_tasks_per_node: The maximum number of tasks that can run + concurrently on a single compute node in the pool. The default value is 1. + The maximum value is the smaller of 4 times the number of cores of the + vmSize of the pool or 256. + :type max_tasks_per_node: int + :param task_scheduling_policy: How tasks are distributed across compute + nodes in a pool. If not specified, the default is spread. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param user_accounts: The list of user accounts to be created on each node + in the pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the pool as + metadata. + :type metadata: list[~azure.batch.models.MetadataItem] + :param stats: Utilization and resource usage statistics for the entire + lifetime of the pool. This property is populated only if the CloudPool was + retrieved with an expand clause including the 'stats' attribute; otherwise + it is null. The statistics may not be immediately available. The Batch + service performs periodic roll-up of statistics. The typical delay is + about 30 minutes. + :type stats: ~azure.batch.models.PoolStatistics + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'PoolState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'allocation_state': {'key': 'allocationState', 'type': 'AllocationState'}, + 'allocation_state_transition_time': {'key': 'allocationStateTransitionTime', 'type': 'iso-8601'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'resize_errors': {'key': 'resizeErrors', 'type': '[ResizeError]'}, + 'current_dedicated_nodes': {'key': 'currentDedicatedNodes', 'type': 'int'}, + 'current_low_priority_nodes': {'key': 'currentLowPriorityNodes', 'type': 'int'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'auto_scale_run': {'key': 'autoScaleRun', 'type': 'AutoScaleRun'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'stats': {'key': 'stats', 'type': 'PoolStatistics'}, + } + + def __init__(self, **kwargs): + super(CloudPool, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.url = kwargs.get('url', None) + self.e_tag = kwargs.get('e_tag', None) + self.last_modified = kwargs.get('last_modified', None) + self.creation_time = kwargs.get('creation_time', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.allocation_state = kwargs.get('allocation_state', None) + self.allocation_state_transition_time = kwargs.get('allocation_state_transition_time', None) + self.vm_size = kwargs.get('vm_size', None) + self.cloud_service_configuration = kwargs.get('cloud_service_configuration', None) + self.virtual_machine_configuration = kwargs.get('virtual_machine_configuration', None) + self.resize_timeout = kwargs.get('resize_timeout', None) + self.resize_errors = kwargs.get('resize_errors', None) + self.current_dedicated_nodes = kwargs.get('current_dedicated_nodes', None) + self.current_low_priority_nodes = kwargs.get('current_low_priority_nodes', None) + self.target_dedicated_nodes = kwargs.get('target_dedicated_nodes', None) + self.target_low_priority_nodes = kwargs.get('target_low_priority_nodes', None) + self.enable_auto_scale = kwargs.get('enable_auto_scale', None) + self.auto_scale_formula = kwargs.get('auto_scale_formula', None) + self.auto_scale_evaluation_interval = kwargs.get('auto_scale_evaluation_interval', None) + self.auto_scale_run = kwargs.get('auto_scale_run', None) + self.enable_inter_node_communication = kwargs.get('enable_inter_node_communication', None) + self.network_configuration = kwargs.get('network_configuration', None) + self.start_task = kwargs.get('start_task', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.application_licenses = kwargs.get('application_licenses', None) + self.max_tasks_per_node = kwargs.get('max_tasks_per_node', None) + self.task_scheduling_policy = kwargs.get('task_scheduling_policy', None) + self.user_accounts = kwargs.get('user_accounts', None) + self.metadata = kwargs.get('metadata', None) + self.stats = kwargs.get('stats', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/cloud_pool_paged.py b/azext/generated/sdk/batch/v2018_12_01/models/cloud_pool_paged.py new file mode 100644 index 00000000..c23eb7cd --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/cloud_pool_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class CloudPoolPaged(Paged): + """ + A paging container for iterating over a list of :class:`CloudPool ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[CloudPool]'} + } + + def __init__(self, *args, **kwargs): + + super(CloudPoolPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/cloud_pool_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/cloud_pool_py3.py new file mode 100644 index 00000000..2a3c79b6 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/cloud_pool_py3.py @@ -0,0 +1,245 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudPool(Model): + """A pool in the Azure Batch service. + + :param id: A string that uniquely identifies the pool within the account. + The ID can contain any combination of alphanumeric characters including + hyphens and underscores, and cannot contain more than 64 characters. The + ID is case-preserving and case-insensitive (that is, you may not have two + IDs within an account that differ only by case). + :type id: str + :param display_name: The display name for the pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param url: The URL of the pool. + :type url: str + :param e_tag: The ETag of the pool. This is an opaque string. You can use + it to detect whether the pool has changed between requests. In particular, + you can be pass the ETag when updating a pool to specify that your changes + should take effect only if nobody else has modified the pool in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the pool. This is the last + time at which the pool level data, such as the targetDedicatedNodes or + enableAutoscale settings, changed. It does not factor in node-level + changes such as a compute node changing state. + :type last_modified: datetime + :param creation_time: The creation time of the pool. + :type creation_time: datetime + :param state: The current state of the pool. Possible values include: + 'active', 'deleting' + :type state: str or ~azure.batch.models.PoolState + :param state_transition_time: The time at which the pool entered its + current state. + :type state_transition_time: datetime + :param allocation_state: Whether the pool is resizing. Possible values + include: 'steady', 'resizing', 'stopping' + :type allocation_state: str or ~azure.batch.models.AllocationState + :param allocation_state_transition_time: The time at which the pool + entered its current allocation state. + :type allocation_state_transition_time: datetime + :param vm_size: The size of virtual machines in the pool. All virtual + machines in a pool are the same size. For information about available + sizes of virtual machines in pools, see Choose a VM size for compute nodes + in an Azure Batch pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the pool. This property and virtualMachineConfiguration are mutually + exclusive and one of the properties must be specified. This property + cannot be specified if the Batch account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the pool. This property and cloudServiceConfiguration are mutually + exclusive and one of the properties must be specified. + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param resize_timeout: The timeout for allocation of compute nodes to the + pool. This is the timeout for the most recent resize operation. (The + initial sizing when the pool is created counts as a resize.) The default + value is 15 minutes. + :type resize_timeout: timedelta + :param resize_errors: A list of errors encountered while performing the + last resize on the pool. This property is set only if one or more errors + occurred during the last pool resize, and only when the pool + allocationState is Steady. + :type resize_errors: list[~azure.batch.models.ResizeError] + :param current_dedicated_nodes: The number of dedicated compute nodes + currently in the pool. + :type current_dedicated_nodes: int + :param current_low_priority_nodes: The number of low-priority compute + nodes currently in the pool. Low-priority compute nodes which have been + preempted are included in this count. + :type current_low_priority_nodes: int + :param target_dedicated_nodes: The desired number of dedicated compute + nodes in the pool. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + compute nodes in the pool. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + property is required and the pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: A formula for the desired number of compute + nodes in the pool. This property is set only if the pool automatically + scales, i.e. enableAutoScale is true. + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the pool size according to the autoscale formula. + This property is set only if the pool automatically scales, i.e. + enableAutoScale is true. + :type auto_scale_evaluation_interval: timedelta + :param auto_scale_run: The results and errors from the last execution of + the autoscale formula. This property is set only if the pool automatically + scales, i.e. enableAutoScale is true. + :type auto_scale_run: ~azure.batch.models.AutoScaleRun + :param enable_inter_node_communication: Whether the pool permits direct + communication between nodes. This imposes restrictions on which nodes can + be assigned to the pool. Specifying this value can reduce the chance of + the requested number of nodes to be allocated in the pool. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A task specified to run on each compute node as it + joins the pool. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: The list of certificates to be installed on + each compute node in the pool. For Windows compute nodes, the Batch + service installs the certificates to the specified certificate store and + location. For Linux compute nodes, the certificates are stored in a + directory inside the task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this + location. For certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of application packages to + be installed on each compute node in the pool. Changes to application + package references affect all new compute nodes joining the pool, but do + not affect compute nodes that are already in the pool until they are + rebooted or reimaged. There is a maximum of 10 application package + references on any given pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each compute node in the pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + pool creation will fail. + :type application_licenses: list[str] + :param max_tasks_per_node: The maximum number of tasks that can run + concurrently on a single compute node in the pool. The default value is 1. + The maximum value is the smaller of 4 times the number of cores of the + vmSize of the pool or 256. + :type max_tasks_per_node: int + :param task_scheduling_policy: How tasks are distributed across compute + nodes in a pool. If not specified, the default is spread. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param user_accounts: The list of user accounts to be created on each node + in the pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the pool as + metadata. + :type metadata: list[~azure.batch.models.MetadataItem] + :param stats: Utilization and resource usage statistics for the entire + lifetime of the pool. This property is populated only if the CloudPool was + retrieved with an expand clause including the 'stats' attribute; otherwise + it is null. The statistics may not be immediately available. The Batch + service performs periodic roll-up of statistics. The typical delay is + about 30 minutes. + :type stats: ~azure.batch.models.PoolStatistics + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'PoolState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'allocation_state': {'key': 'allocationState', 'type': 'AllocationState'}, + 'allocation_state_transition_time': {'key': 'allocationStateTransitionTime', 'type': 'iso-8601'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'resize_errors': {'key': 'resizeErrors', 'type': '[ResizeError]'}, + 'current_dedicated_nodes': {'key': 'currentDedicatedNodes', 'type': 'int'}, + 'current_low_priority_nodes': {'key': 'currentLowPriorityNodes', 'type': 'int'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'auto_scale_run': {'key': 'autoScaleRun', 'type': 'AutoScaleRun'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'stats': {'key': 'stats', 'type': 'PoolStatistics'}, + } + + def __init__(self, *, id: str=None, display_name: str=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, allocation_state=None, allocation_state_transition_time=None, vm_size: str=None, cloud_service_configuration=None, virtual_machine_configuration=None, resize_timeout=None, resize_errors=None, current_dedicated_nodes: int=None, current_low_priority_nodes: int=None, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, enable_auto_scale: bool=None, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, auto_scale_run=None, enable_inter_node_communication: bool=None, network_configuration=None, start_task=None, certificate_references=None, application_package_references=None, application_licenses=None, max_tasks_per_node: int=None, task_scheduling_policy=None, user_accounts=None, metadata=None, stats=None, **kwargs) -> None: + super(CloudPool, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.url = url + self.e_tag = e_tag + self.last_modified = last_modified + self.creation_time = creation_time + self.state = state + self.state_transition_time = state_transition_time + self.allocation_state = allocation_state + self.allocation_state_transition_time = allocation_state_transition_time + self.vm_size = vm_size + self.cloud_service_configuration = cloud_service_configuration + self.virtual_machine_configuration = virtual_machine_configuration + self.resize_timeout = resize_timeout + self.resize_errors = resize_errors + self.current_dedicated_nodes = current_dedicated_nodes + self.current_low_priority_nodes = current_low_priority_nodes + self.target_dedicated_nodes = target_dedicated_nodes + self.target_low_priority_nodes = target_low_priority_nodes + self.enable_auto_scale = enable_auto_scale + self.auto_scale_formula = auto_scale_formula + self.auto_scale_evaluation_interval = auto_scale_evaluation_interval + self.auto_scale_run = auto_scale_run + self.enable_inter_node_communication = enable_inter_node_communication + self.network_configuration = network_configuration + self.start_task = start_task + self.certificate_references = certificate_references + self.application_package_references = application_package_references + self.application_licenses = application_licenses + self.max_tasks_per_node = max_tasks_per_node + self.task_scheduling_policy = task_scheduling_policy + self.user_accounts = user_accounts + self.metadata = metadata + self.stats = stats diff --git a/azext/generated/sdk/batch/v2018_12_01/models/cloud_service_configuration.py b/azext/generated/sdk/batch/v2018_12_01/models/cloud_service_configuration.py new file mode 100644 index 00000000..e0772120 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/cloud_service_configuration.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudServiceConfiguration(Model): + """The configuration for nodes in a pool based on the Azure Cloud Services + platform. + + All required parameters must be populated in order to send to Azure. + + :param os_family: Required. The Azure Guest OS family to be installed on + the virtual machines in the pool. Possible values are: + 2 - OS Family 2, equivalent to Windows Server 2008 R2 SP1. + 3 - OS Family 3, equivalent to Windows Server 2012. + 4 - OS Family 4, equivalent to Windows Server 2012 R2. + 5 - OS Family 5, equivalent to Windows Server 2016. + 6 - OS Family 6, equivalent to Windows Server 2019. For more information, + see Azure Guest OS Releases + (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). + :type os_family: str + :param os_version: The Azure Guest OS version to be installed on the + virtual machines in the pool. The default value is * which specifies the + latest operating system version for the specified OS family. + :type os_version: str + """ + + _validation = { + 'os_family': {'required': True}, + } + + _attribute_map = { + 'os_family': {'key': 'osFamily', 'type': 'str'}, + 'os_version': {'key': 'osVersion', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(CloudServiceConfiguration, self).__init__(**kwargs) + self.os_family = kwargs.get('os_family', None) + self.os_version = kwargs.get('os_version', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/cloud_service_configuration_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/cloud_service_configuration_py3.py new file mode 100644 index 00000000..9ebef281 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/cloud_service_configuration_py3.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudServiceConfiguration(Model): + """The configuration for nodes in a pool based on the Azure Cloud Services + platform. + + All required parameters must be populated in order to send to Azure. + + :param os_family: Required. The Azure Guest OS family to be installed on + the virtual machines in the pool. Possible values are: + 2 - OS Family 2, equivalent to Windows Server 2008 R2 SP1. + 3 - OS Family 3, equivalent to Windows Server 2012. + 4 - OS Family 4, equivalent to Windows Server 2012 R2. + 5 - OS Family 5, equivalent to Windows Server 2016. + 6 - OS Family 6, equivalent to Windows Server 2019. For more information, + see Azure Guest OS Releases + (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). + :type os_family: str + :param os_version: The Azure Guest OS version to be installed on the + virtual machines in the pool. The default value is * which specifies the + latest operating system version for the specified OS family. + :type os_version: str + """ + + _validation = { + 'os_family': {'required': True}, + } + + _attribute_map = { + 'os_family': {'key': 'osFamily', 'type': 'str'}, + 'os_version': {'key': 'osVersion', 'type': 'str'}, + } + + def __init__(self, *, os_family: str, os_version: str=None, **kwargs) -> None: + super(CloudServiceConfiguration, self).__init__(**kwargs) + self.os_family = os_family + self.os_version = os_version diff --git a/azext/generated/sdk/batch/v2018_12_01/models/cloud_task.py b/azext/generated/sdk/batch/v2018_12_01/models/cloud_task.py new file mode 100644 index 00000000..df9e3677 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/cloud_task.py @@ -0,0 +1,208 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudTask(Model): + """An Azure Batch task. + + Batch will retry tasks when a recovery operation is triggered on a compute + node. Examples of recovery operations include (but are not limited to) when + an unhealthy compute node is rebooted or a compute node disappeared due to + host failure. Retries due to recovery operations are independent of and are + not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is + 0, an internal retry due to a recovery operation may occur. Because of + this, all tasks should be idempotent. This means tasks need to tolerate + being interrupted and restarted without causing any corruption or duplicate + data. The best practice for long running tasks is to use some form of + checkpointing. + + :param id: A string that uniquely identifies the task within the job. The + ID can contain any combination of alphanumeric characters including + hyphens and underscores, and cannot contain more than 64 characters. + :type id: str + :param display_name: A display name for the task. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param url: The URL of the task. + :type url: str + :param e_tag: The ETag of the task. This is an opaque string. You can use + it to detect whether the task has changed between requests. In particular, + you can be pass the ETag when updating a task to specify that your changes + should take effect only if nobody else has modified the task in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the task. + :type last_modified: datetime + :param creation_time: The creation time of the task. + :type creation_time: datetime + :param exit_conditions: How the Batch service should respond when the task + completes. + :type exit_conditions: ~azure.batch.models.ExitConditions + :param state: The current state of the task. Possible values include: + 'active', 'preparing', 'running', 'completed' + :type state: str or ~azure.batch.models.TaskState + :param state_transition_time: The time at which the task entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the task. This property is + not set if the task is in its initial Active state. Possible values + include: 'active', 'preparing', 'running', 'completed' + :type previous_state: str or ~azure.batch.models.TaskState + :param previous_state_transition_time: The time at which the task entered + its previous state. This property is not set if the task is in its initial + Active state. + :type previous_state_transition_time: datetime + :param command_line: The command line of the task. For multi-instance + tasks, the command line is executed as the primary task, after the primary + task and all subtasks have finished executing the coordination command + line. The command line does not run under a shell, and therefore cannot + take advantage of shell features such as environment variable expansion. + If you want to take advantage of such features, you should invoke the + shell in the command line, for example using "cmd /c MyCommand" in Windows + or "/bin/sh -c MyCommand" in Linux. If the command line refers to file + paths, it should use a relative path (relative to the task working + directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + task runs. If the pool that will run this task has containerConfiguration + set, this must be set as well. If the pool that will run this task doesn't + have containerConfiguration set, this must not be set. When this is + specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR + (the root of Azure Batch directories on the node) are mapped into the + container, all task environment variables are mapped into the container, + and the task command line is executed in the container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. For + multi-instance tasks, the resource files will only be downloaded to the + compute node on which the primary task is executed. There is a maximum + size for the list of resource files. When the max size is exceeded, the + request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the compute node after running the command line. For multi-instance + tasks, the files will only be uploaded from the compute node on which the + primary task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param affinity_info: A locality hint that can be used by the Batch + service to select a compute node on which to start the new task. + :type affinity_info: ~azure.batch.models.AffinityInformation + :param constraints: The execution constraints that apply to this task. + :type constraints: ~azure.batch.models.TaskConstraints + :param user_identity: The user identity under which the task runs. If + omitted, the task runs as a non-administrative user unique to the task. + :type user_identity: ~azure.batch.models.UserIdentity + :param execution_info: Information about the execution of the task. + :type execution_info: ~azure.batch.models.TaskExecutionInformation + :param node_info: Information about the compute node on which the task + ran. + :type node_info: ~azure.batch.models.ComputeNodeInformation + :param multi_instance_settings: An object that indicates that the task is + a multi-instance task, and contains information about how to run the + multi-instance task. + :type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings + :param stats: Resource usage statistics for the task. + :type stats: ~azure.batch.models.TaskStatistics + :param depends_on: The tasks that this task depends on. This task will not + be scheduled until all tasks that it depends on have completed + successfully. If any of those tasks fail and exhaust their retry counts, + this task will never be scheduled. + :type depends_on: ~azure.batch.models.TaskDependencies + :param application_package_references: A list of application packages that + the Batch service will deploy to the compute node before running the + command line. Application packages are downloaded and deployed to a shared + directory, not the task working directory. Therefore, if a referenced + package is already on the compute node, and is up to date, then it is not + re-downloaded; the existing copy on the compute node is used. If a + referenced application package cannot be installed, for example because + the package has been deleted or because download failed, the task fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the task can use to perform Batch service operations. If this + property is set, the Batch service provides the task with an + authentication token which can be used to authenticate Batch service + operations without requiring an account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the task can carry out using the token depend on the settings. For + example, a task can request job permissions in order to add other tasks to + the job, or check the status of the job or of other tasks under the job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'}, + 'state': {'key': 'state', 'type': 'TaskState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'TaskState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'execution_info': {'key': 'executionInfo', 'type': 'TaskExecutionInformation'}, + 'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'}, + 'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'}, + 'stats': {'key': 'stats', 'type': 'TaskStatistics'}, + 'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + } + + def __init__(self, **kwargs): + super(CloudTask, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.url = kwargs.get('url', None) + self.e_tag = kwargs.get('e_tag', None) + self.last_modified = kwargs.get('last_modified', None) + self.creation_time = kwargs.get('creation_time', None) + self.exit_conditions = kwargs.get('exit_conditions', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.previous_state = kwargs.get('previous_state', None) + self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.resource_files = kwargs.get('resource_files', None) + self.output_files = kwargs.get('output_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.affinity_info = kwargs.get('affinity_info', None) + self.constraints = kwargs.get('constraints', None) + self.user_identity = kwargs.get('user_identity', None) + self.execution_info = kwargs.get('execution_info', None) + self.node_info = kwargs.get('node_info', None) + self.multi_instance_settings = kwargs.get('multi_instance_settings', None) + self.stats = kwargs.get('stats', None) + self.depends_on = kwargs.get('depends_on', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.authentication_token_settings = kwargs.get('authentication_token_settings', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/cloud_task_list_subtasks_result.py b/azext/generated/sdk/batch/v2018_12_01/models/cloud_task_list_subtasks_result.py new file mode 100644 index 00000000..c592b348 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/cloud_task_list_subtasks_result.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudTaskListSubtasksResult(Model): + """The result of listing the subtasks of a task. + + :param value: The list of subtasks. + :type value: list[~azure.batch.models.SubtaskInformation] + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[SubtaskInformation]'}, + } + + def __init__(self, **kwargs): + super(CloudTaskListSubtasksResult, self).__init__(**kwargs) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/cloud_task_list_subtasks_result_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/cloud_task_list_subtasks_result_py3.py new file mode 100644 index 00000000..f21e7260 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/cloud_task_list_subtasks_result_py3.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudTaskListSubtasksResult(Model): + """The result of listing the subtasks of a task. + + :param value: The list of subtasks. + :type value: list[~azure.batch.models.SubtaskInformation] + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[SubtaskInformation]'}, + } + + def __init__(self, *, value=None, **kwargs) -> None: + super(CloudTaskListSubtasksResult, self).__init__(**kwargs) + self.value = value diff --git a/azext/generated/sdk/batch/v2018_12_01/models/cloud_task_paged.py b/azext/generated/sdk/batch/v2018_12_01/models/cloud_task_paged.py new file mode 100644 index 00000000..3d8ef774 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/cloud_task_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class CloudTaskPaged(Paged): + """ + A paging container for iterating over a list of :class:`CloudTask ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[CloudTask]'} + } + + def __init__(self, *args, **kwargs): + + super(CloudTaskPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/cloud_task_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/cloud_task_py3.py new file mode 100644 index 00000000..95928544 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/cloud_task_py3.py @@ -0,0 +1,208 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudTask(Model): + """An Azure Batch task. + + Batch will retry tasks when a recovery operation is triggered on a compute + node. Examples of recovery operations include (but are not limited to) when + an unhealthy compute node is rebooted or a compute node disappeared due to + host failure. Retries due to recovery operations are independent of and are + not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is + 0, an internal retry due to a recovery operation may occur. Because of + this, all tasks should be idempotent. This means tasks need to tolerate + being interrupted and restarted without causing any corruption or duplicate + data. The best practice for long running tasks is to use some form of + checkpointing. + + :param id: A string that uniquely identifies the task within the job. The + ID can contain any combination of alphanumeric characters including + hyphens and underscores, and cannot contain more than 64 characters. + :type id: str + :param display_name: A display name for the task. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param url: The URL of the task. + :type url: str + :param e_tag: The ETag of the task. This is an opaque string. You can use + it to detect whether the task has changed between requests. In particular, + you can be pass the ETag when updating a task to specify that your changes + should take effect only if nobody else has modified the task in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the task. + :type last_modified: datetime + :param creation_time: The creation time of the task. + :type creation_time: datetime + :param exit_conditions: How the Batch service should respond when the task + completes. + :type exit_conditions: ~azure.batch.models.ExitConditions + :param state: The current state of the task. Possible values include: + 'active', 'preparing', 'running', 'completed' + :type state: str or ~azure.batch.models.TaskState + :param state_transition_time: The time at which the task entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the task. This property is + not set if the task is in its initial Active state. Possible values + include: 'active', 'preparing', 'running', 'completed' + :type previous_state: str or ~azure.batch.models.TaskState + :param previous_state_transition_time: The time at which the task entered + its previous state. This property is not set if the task is in its initial + Active state. + :type previous_state_transition_time: datetime + :param command_line: The command line of the task. For multi-instance + tasks, the command line is executed as the primary task, after the primary + task and all subtasks have finished executing the coordination command + line. The command line does not run under a shell, and therefore cannot + take advantage of shell features such as environment variable expansion. + If you want to take advantage of such features, you should invoke the + shell in the command line, for example using "cmd /c MyCommand" in Windows + or "/bin/sh -c MyCommand" in Linux. If the command line refers to file + paths, it should use a relative path (relative to the task working + directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + task runs. If the pool that will run this task has containerConfiguration + set, this must be set as well. If the pool that will run this task doesn't + have containerConfiguration set, this must not be set. When this is + specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR + (the root of Azure Batch directories on the node) are mapped into the + container, all task environment variables are mapped into the container, + and the task command line is executed in the container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. For + multi-instance tasks, the resource files will only be downloaded to the + compute node on which the primary task is executed. There is a maximum + size for the list of resource files. When the max size is exceeded, the + request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the compute node after running the command line. For multi-instance + tasks, the files will only be uploaded from the compute node on which the + primary task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param affinity_info: A locality hint that can be used by the Batch + service to select a compute node on which to start the new task. + :type affinity_info: ~azure.batch.models.AffinityInformation + :param constraints: The execution constraints that apply to this task. + :type constraints: ~azure.batch.models.TaskConstraints + :param user_identity: The user identity under which the task runs. If + omitted, the task runs as a non-administrative user unique to the task. + :type user_identity: ~azure.batch.models.UserIdentity + :param execution_info: Information about the execution of the task. + :type execution_info: ~azure.batch.models.TaskExecutionInformation + :param node_info: Information about the compute node on which the task + ran. + :type node_info: ~azure.batch.models.ComputeNodeInformation + :param multi_instance_settings: An object that indicates that the task is + a multi-instance task, and contains information about how to run the + multi-instance task. + :type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings + :param stats: Resource usage statistics for the task. + :type stats: ~azure.batch.models.TaskStatistics + :param depends_on: The tasks that this task depends on. This task will not + be scheduled until all tasks that it depends on have completed + successfully. If any of those tasks fail and exhaust their retry counts, + this task will never be scheduled. + :type depends_on: ~azure.batch.models.TaskDependencies + :param application_package_references: A list of application packages that + the Batch service will deploy to the compute node before running the + command line. Application packages are downloaded and deployed to a shared + directory, not the task working directory. Therefore, if a referenced + package is already on the compute node, and is up to date, then it is not + re-downloaded; the existing copy on the compute node is used. If a + referenced application package cannot be installed, for example because + the package has been deleted or because download failed, the task fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the task can use to perform Batch service operations. If this + property is set, the Batch service provides the task with an + authentication token which can be used to authenticate Batch service + operations without requiring an account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the task can carry out using the token depend on the settings. For + example, a task can request job permissions in order to add other tasks to + the job, or check the status of the job or of other tasks under the job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'}, + 'state': {'key': 'state', 'type': 'TaskState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'TaskState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'execution_info': {'key': 'executionInfo', 'type': 'TaskExecutionInformation'}, + 'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'}, + 'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'}, + 'stats': {'key': 'stats', 'type': 'TaskStatistics'}, + 'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + } + + def __init__(self, *, id: str=None, display_name: str=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, exit_conditions=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, command_line: str=None, container_settings=None, resource_files=None, output_files=None, environment_settings=None, affinity_info=None, constraints=None, user_identity=None, execution_info=None, node_info=None, multi_instance_settings=None, stats=None, depends_on=None, application_package_references=None, authentication_token_settings=None, **kwargs) -> None: + super(CloudTask, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.url = url + self.e_tag = e_tag + self.last_modified = last_modified + self.creation_time = creation_time + self.exit_conditions = exit_conditions + self.state = state + self.state_transition_time = state_transition_time + self.previous_state = previous_state + self.previous_state_transition_time = previous_state_transition_time + self.command_line = command_line + self.container_settings = container_settings + self.resource_files = resource_files + self.output_files = output_files + self.environment_settings = environment_settings + self.affinity_info = affinity_info + self.constraints = constraints + self.user_identity = user_identity + self.execution_info = execution_info + self.node_info = node_info + self.multi_instance_settings = multi_instance_settings + self.stats = stats + self.depends_on = depends_on + self.application_package_references = application_package_references + self.authentication_token_settings = authentication_token_settings diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node.py new file mode 100644 index 00000000..1b511963 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node.py @@ -0,0 +1,157 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNode(Model): + """A compute node in the Batch service. + + :param id: The ID of the compute node. Every node that is added to a pool + is assigned a unique ID. Whenever a node is removed from a pool, all of + its local files are deleted, and the ID is reclaimed and could be reused + for new nodes. + :type id: str + :param url: The URL of the compute node. + :type url: str + :param state: The current state of the compute node. The low-priority node + has been preempted. Tasks which were running on the node when it was + preempted will be rescheduled when another node becomes available. + Possible values include: 'idle', 'rebooting', 'reimaging', 'running', + 'unusable', 'creating', 'starting', 'waitingForStartTask', + 'startTaskFailed', 'unknown', 'leavingPool', 'offline', 'preempted' + :type state: str or ~azure.batch.models.ComputeNodeState + :param scheduling_state: Whether the compute node is available for task + scheduling. Possible values include: 'enabled', 'disabled' + :type scheduling_state: str or ~azure.batch.models.SchedulingState + :param state_transition_time: The time at which the compute node entered + its current state. + :type state_transition_time: datetime + :param last_boot_time: The last time at which the compute node was + started. This property may not be present if the node state is unusable. + :type last_boot_time: datetime + :param allocation_time: The time at which this compute node was allocated + to the pool. This is the time when the node was initially allocated and + doesn't change once set. It is not updated when the node is service healed + or preempted. + :type allocation_time: datetime + :param ip_address: The IP address that other compute nodes can use to + communicate with this compute node. Every node that is added to a pool is + assigned a unique IP address. Whenever a node is removed from a pool, all + of its local files are deleted, and the IP address is reclaimed and could + be reused for new nodes. + :type ip_address: str + :param affinity_id: An identifier which can be passed when adding a task + to request that the task be scheduled on this node. Note that this is just + a soft affinity. If the target node is busy or unavailable at the time the + task is scheduled, then the task will be scheduled elsewhere. + :type affinity_id: str + :param vm_size: The size of the virtual machine hosting the compute node. + For information about available sizes of virtual machines in pools, see + Choose a VM size for compute nodes in an Azure Batch pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param total_tasks_run: The total number of job tasks completed on the + compute node. This includes Job Manager tasks and normal tasks, but not + Job Preparation, Job Release or Start tasks. + :type total_tasks_run: int + :param running_tasks_count: The total number of currently running job + tasks on the compute node. This includes Job Manager tasks and normal + tasks, but not Job Preparation, Job Release or Start tasks. + :type running_tasks_count: int + :param total_tasks_succeeded: The total number of job tasks which + completed successfully (with exitCode 0) on the compute node. This + includes Job Manager tasks and normal tasks, but not Job Preparation, Job + Release or Start tasks. + :type total_tasks_succeeded: int + :param recent_tasks: A list of tasks whose state has recently changed. + This property is present only if at least one task has run on this node + since it was assigned to the pool. + :type recent_tasks: list[~azure.batch.models.TaskInformation] + :param start_task: The task specified to run on the compute node as it + joins the pool. + :type start_task: ~azure.batch.models.StartTask + :param start_task_info: Runtime information about the execution of the + start task on the compute node. + :type start_task_info: ~azure.batch.models.StartTaskInformation + :param certificate_references: The list of certificates installed on the + compute node. For Windows compute nodes, the Batch service installs the + certificates to the specified certificate store and location. For Linux + compute nodes, the certificates are stored in a directory inside the task + working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is + supplied to the task to query for this location. For certificates with + visibility of 'remoteUser', a 'certs' directory is created in the user's + home directory (e.g., /home/{user-name}/certs) and certificates are placed + in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param errors: The list of errors that are currently being encountered by + the compute node. + :type errors: list[~azure.batch.models.ComputeNodeError] + :param is_dedicated: Whether this compute node is a dedicated node. If + false, the node is a low-priority node. + :type is_dedicated: bool + :param endpoint_configuration: The endpoint configuration for the compute + node. + :type endpoint_configuration: + ~azure.batch.models.ComputeNodeEndpointConfiguration + :param node_agent_info: Information about the node agent version and the + time the node upgraded to a new version. + :type node_agent_info: ~azure.batch.models.NodeAgentInformation + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'ComputeNodeState'}, + 'scheduling_state': {'key': 'schedulingState', 'type': 'SchedulingState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'last_boot_time': {'key': 'lastBootTime', 'type': 'iso-8601'}, + 'allocation_time': {'key': 'allocationTime', 'type': 'iso-8601'}, + 'ip_address': {'key': 'ipAddress', 'type': 'str'}, + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'total_tasks_run': {'key': 'totalTasksRun', 'type': 'int'}, + 'running_tasks_count': {'key': 'runningTasksCount', 'type': 'int'}, + 'total_tasks_succeeded': {'key': 'totalTasksSucceeded', 'type': 'int'}, + 'recent_tasks': {'key': 'recentTasks', 'type': '[TaskInformation]'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'start_task_info': {'key': 'startTaskInfo', 'type': 'StartTaskInformation'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'errors': {'key': 'errors', 'type': '[ComputeNodeError]'}, + 'is_dedicated': {'key': 'isDedicated', 'type': 'bool'}, + 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'ComputeNodeEndpointConfiguration'}, + 'node_agent_info': {'key': 'nodeAgentInfo', 'type': 'NodeAgentInformation'}, + } + + def __init__(self, **kwargs): + super(ComputeNode, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.url = kwargs.get('url', None) + self.state = kwargs.get('state', None) + self.scheduling_state = kwargs.get('scheduling_state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.last_boot_time = kwargs.get('last_boot_time', None) + self.allocation_time = kwargs.get('allocation_time', None) + self.ip_address = kwargs.get('ip_address', None) + self.affinity_id = kwargs.get('affinity_id', None) + self.vm_size = kwargs.get('vm_size', None) + self.total_tasks_run = kwargs.get('total_tasks_run', None) + self.running_tasks_count = kwargs.get('running_tasks_count', None) + self.total_tasks_succeeded = kwargs.get('total_tasks_succeeded', None) + self.recent_tasks = kwargs.get('recent_tasks', None) + self.start_task = kwargs.get('start_task', None) + self.start_task_info = kwargs.get('start_task_info', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.errors = kwargs.get('errors', None) + self.is_dedicated = kwargs.get('is_dedicated', None) + self.endpoint_configuration = kwargs.get('endpoint_configuration', None) + self.node_agent_info = kwargs.get('node_agent_info', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_add_user_options.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_add_user_options.py new file mode 100644 index 00000000..89020475 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_add_user_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeAddUserOptions(Model): + """Additional parameters for add_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeAddUserOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_add_user_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_add_user_options_py3.py new file mode 100644 index 00000000..dab4040b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_add_user_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeAddUserOptions(Model): + """Additional parameters for add_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeAddUserOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_delete_user_options.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_delete_user_options.py new file mode 100644 index 00000000..4874a98a --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_delete_user_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeDeleteUserOptions(Model): + """Additional parameters for delete_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeDeleteUserOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_delete_user_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_delete_user_options_py3.py new file mode 100644 index 00000000..88217b93 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_delete_user_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeDeleteUserOptions(Model): + """Additional parameters for delete_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeDeleteUserOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_disable_scheduling_options.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_disable_scheduling_options.py new file mode 100644 index 00000000..92bf2911 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_disable_scheduling_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeDisableSchedulingOptions(Model): + """Additional parameters for disable_scheduling operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeDisableSchedulingOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_disable_scheduling_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_disable_scheduling_options_py3.py new file mode 100644 index 00000000..0432c5db --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_disable_scheduling_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeDisableSchedulingOptions(Model): + """Additional parameters for disable_scheduling operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeDisableSchedulingOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_enable_scheduling_options.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_enable_scheduling_options.py new file mode 100644 index 00000000..905e3e34 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_enable_scheduling_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeEnableSchedulingOptions(Model): + """Additional parameters for enable_scheduling operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeEnableSchedulingOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_enable_scheduling_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_enable_scheduling_options_py3.py new file mode 100644 index 00000000..4ef5d9ad --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_enable_scheduling_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeEnableSchedulingOptions(Model): + """Additional parameters for enable_scheduling operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeEnableSchedulingOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_endpoint_configuration.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_endpoint_configuration.py new file mode 100644 index 00000000..922c5c10 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_endpoint_configuration.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeEndpointConfiguration(Model): + """The endpoint configuration for the compute node. + + All required parameters must be populated in order to send to Azure. + + :param inbound_endpoints: Required. The list of inbound endpoints that are + accessible on the compute node. + :type inbound_endpoints: list[~azure.batch.models.InboundEndpoint] + """ + + _validation = { + 'inbound_endpoints': {'required': True}, + } + + _attribute_map = { + 'inbound_endpoints': {'key': 'inboundEndpoints', 'type': '[InboundEndpoint]'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeEndpointConfiguration, self).__init__(**kwargs) + self.inbound_endpoints = kwargs.get('inbound_endpoints', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_endpoint_configuration_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_endpoint_configuration_py3.py new file mode 100644 index 00000000..72dc202e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_endpoint_configuration_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeEndpointConfiguration(Model): + """The endpoint configuration for the compute node. + + All required parameters must be populated in order to send to Azure. + + :param inbound_endpoints: Required. The list of inbound endpoints that are + accessible on the compute node. + :type inbound_endpoints: list[~azure.batch.models.InboundEndpoint] + """ + + _validation = { + 'inbound_endpoints': {'required': True}, + } + + _attribute_map = { + 'inbound_endpoints': {'key': 'inboundEndpoints', 'type': '[InboundEndpoint]'}, + } + + def __init__(self, *, inbound_endpoints, **kwargs) -> None: + super(ComputeNodeEndpointConfiguration, self).__init__(**kwargs) + self.inbound_endpoints = inbound_endpoints diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_error.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_error.py new file mode 100644 index 00000000..e02a6681 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_error.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeError(Model): + """An error encountered by a compute node. + + :param code: An identifier for the compute node error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the compute node error, intended to + be suitable for display in a user interface. + :type message: str + :param error_details: The list of additional error details related to the + compute node error. + :type error_details: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'error_details': {'key': 'errorDetails', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.error_details = kwargs.get('error_details', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_error_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_error_py3.py new file mode 100644 index 00000000..53a6871b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_error_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeError(Model): + """An error encountered by a compute node. + + :param code: An identifier for the compute node error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the compute node error, intended to + be suitable for display in a user interface. + :type message: str + :param error_details: The list of additional error details related to the + compute node error. + :type error_details: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'error_details': {'key': 'errorDetails', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, code: str=None, message: str=None, error_details=None, **kwargs) -> None: + super(ComputeNodeError, self).__init__(**kwargs) + self.code = code + self.message = message + self.error_details = error_details diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_options.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_options.py new file mode 100644 index 00000000..6218d444 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_options.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_options_py3.py new file mode 100644 index 00000000..de6284b3 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_options_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeGetOptions, self).__init__(**kwargs) + self.select = select + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_remote_desktop_options.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_remote_desktop_options.py new file mode 100644 index 00000000..20af5558 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_remote_desktop_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteDesktopOptions(Model): + """Additional parameters for get_remote_desktop operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeGetRemoteDesktopOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_remote_desktop_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_remote_desktop_options_py3.py new file mode 100644 index 00000000..d79ce622 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_remote_desktop_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteDesktopOptions(Model): + """Additional parameters for get_remote_desktop operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeGetRemoteDesktopOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_remote_login_settings_options.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_remote_login_settings_options.py new file mode 100644 index 00000000..9c01ed5f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_remote_login_settings_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteLoginSettingsOptions(Model): + """Additional parameters for get_remote_login_settings operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeGetRemoteLoginSettingsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_remote_login_settings_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_remote_login_settings_options_py3.py new file mode 100644 index 00000000..2d7987ab --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_remote_login_settings_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteLoginSettingsOptions(Model): + """Additional parameters for get_remote_login_settings operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeGetRemoteLoginSettingsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_remote_login_settings_result.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_remote_login_settings_result.py new file mode 100644 index 00000000..2d4e378c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_remote_login_settings_result.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteLoginSettingsResult(Model): + """The remote login settings for a compute node. + + All required parameters must be populated in order to send to Azure. + + :param remote_login_ip_address: Required. The IP address used for remote + login to the compute node. + :type remote_login_ip_address: str + :param remote_login_port: Required. The port used for remote login to the + compute node. + :type remote_login_port: int + """ + + _validation = { + 'remote_login_ip_address': {'required': True}, + 'remote_login_port': {'required': True}, + } + + _attribute_map = { + 'remote_login_ip_address': {'key': 'remoteLoginIPAddress', 'type': 'str'}, + 'remote_login_port': {'key': 'remoteLoginPort', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeGetRemoteLoginSettingsResult, self).__init__(**kwargs) + self.remote_login_ip_address = kwargs.get('remote_login_ip_address', None) + self.remote_login_port = kwargs.get('remote_login_port', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_remote_login_settings_result_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_remote_login_settings_result_py3.py new file mode 100644 index 00000000..c13ace1c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_get_remote_login_settings_result_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteLoginSettingsResult(Model): + """The remote login settings for a compute node. + + All required parameters must be populated in order to send to Azure. + + :param remote_login_ip_address: Required. The IP address used for remote + login to the compute node. + :type remote_login_ip_address: str + :param remote_login_port: Required. The port used for remote login to the + compute node. + :type remote_login_port: int + """ + + _validation = { + 'remote_login_ip_address': {'required': True}, + 'remote_login_port': {'required': True}, + } + + _attribute_map = { + 'remote_login_ip_address': {'key': 'remoteLoginIPAddress', 'type': 'str'}, + 'remote_login_port': {'key': 'remoteLoginPort', 'type': 'int'}, + } + + def __init__(self, *, remote_login_ip_address: str, remote_login_port: int, **kwargs) -> None: + super(ComputeNodeGetRemoteLoginSettingsResult, self).__init__(**kwargs) + self.remote_login_ip_address = remote_login_ip_address + self.remote_login_port = remote_login_port diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_information.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_information.py new file mode 100644 index 00000000..9c6a3421 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_information.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeInformation(Model): + """Information about the compute node on which a task ran. + + :param affinity_id: An identifier for the compute node on which the task + ran, which can be passed when adding a task to request that the task be + scheduled on this compute node. + :type affinity_id: str + :param node_url: The URL of the node on which the task ran. . + :type node_url: str + :param pool_id: The ID of the pool on which the task ran. + :type pool_id: str + :param node_id: The ID of the node on which the task ran. + :type node_id: str + :param task_root_directory: The root directory of the task on the compute + node. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the task + on the compute node. + :type task_root_directory_url: str + """ + + _attribute_map = { + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + 'node_url': {'key': 'nodeUrl', 'type': 'str'}, + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'node_id': {'key': 'nodeId', 'type': 'str'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeInformation, self).__init__(**kwargs) + self.affinity_id = kwargs.get('affinity_id', None) + self.node_url = kwargs.get('node_url', None) + self.pool_id = kwargs.get('pool_id', None) + self.node_id = kwargs.get('node_id', None) + self.task_root_directory = kwargs.get('task_root_directory', None) + self.task_root_directory_url = kwargs.get('task_root_directory_url', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_information_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_information_py3.py new file mode 100644 index 00000000..7c82bde1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_information_py3.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeInformation(Model): + """Information about the compute node on which a task ran. + + :param affinity_id: An identifier for the compute node on which the task + ran, which can be passed when adding a task to request that the task be + scheduled on this compute node. + :type affinity_id: str + :param node_url: The URL of the node on which the task ran. . + :type node_url: str + :param pool_id: The ID of the pool on which the task ran. + :type pool_id: str + :param node_id: The ID of the node on which the task ran. + :type node_id: str + :param task_root_directory: The root directory of the task on the compute + node. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the task + on the compute node. + :type task_root_directory_url: str + """ + + _attribute_map = { + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + 'node_url': {'key': 'nodeUrl', 'type': 'str'}, + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'node_id': {'key': 'nodeId', 'type': 'str'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + } + + def __init__(self, *, affinity_id: str=None, node_url: str=None, pool_id: str=None, node_id: str=None, task_root_directory: str=None, task_root_directory_url: str=None, **kwargs) -> None: + super(ComputeNodeInformation, self).__init__(**kwargs) + self.affinity_id = affinity_id + self.node_url = node_url + self.pool_id = pool_id + self.node_id = node_id + self.task_root_directory = task_root_directory + self.task_root_directory_url = task_root_directory_url diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_list_options.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_list_options.py new file mode 100644 index 00000000..b3cf782f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_list_options.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 nodes can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_list_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_list_options_py3.py new file mode 100644 index 00000000..e046429d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_list_options_py3.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 nodes can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_paged.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_paged.py new file mode 100644 index 00000000..26f41dcb --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class ComputeNodePaged(Paged): + """ + A paging container for iterating over a list of :class:`ComputeNode ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[ComputeNode]'} + } + + def __init__(self, *args, **kwargs): + + super(ComputeNodePaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_py3.py new file mode 100644 index 00000000..be5eebf2 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_py3.py @@ -0,0 +1,157 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNode(Model): + """A compute node in the Batch service. + + :param id: The ID of the compute node. Every node that is added to a pool + is assigned a unique ID. Whenever a node is removed from a pool, all of + its local files are deleted, and the ID is reclaimed and could be reused + for new nodes. + :type id: str + :param url: The URL of the compute node. + :type url: str + :param state: The current state of the compute node. The low-priority node + has been preempted. Tasks which were running on the node when it was + preempted will be rescheduled when another node becomes available. + Possible values include: 'idle', 'rebooting', 'reimaging', 'running', + 'unusable', 'creating', 'starting', 'waitingForStartTask', + 'startTaskFailed', 'unknown', 'leavingPool', 'offline', 'preempted' + :type state: str or ~azure.batch.models.ComputeNodeState + :param scheduling_state: Whether the compute node is available for task + scheduling. Possible values include: 'enabled', 'disabled' + :type scheduling_state: str or ~azure.batch.models.SchedulingState + :param state_transition_time: The time at which the compute node entered + its current state. + :type state_transition_time: datetime + :param last_boot_time: The last time at which the compute node was + started. This property may not be present if the node state is unusable. + :type last_boot_time: datetime + :param allocation_time: The time at which this compute node was allocated + to the pool. This is the time when the node was initially allocated and + doesn't change once set. It is not updated when the node is service healed + or preempted. + :type allocation_time: datetime + :param ip_address: The IP address that other compute nodes can use to + communicate with this compute node. Every node that is added to a pool is + assigned a unique IP address. Whenever a node is removed from a pool, all + of its local files are deleted, and the IP address is reclaimed and could + be reused for new nodes. + :type ip_address: str + :param affinity_id: An identifier which can be passed when adding a task + to request that the task be scheduled on this node. Note that this is just + a soft affinity. If the target node is busy or unavailable at the time the + task is scheduled, then the task will be scheduled elsewhere. + :type affinity_id: str + :param vm_size: The size of the virtual machine hosting the compute node. + For information about available sizes of virtual machines in pools, see + Choose a VM size for compute nodes in an Azure Batch pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param total_tasks_run: The total number of job tasks completed on the + compute node. This includes Job Manager tasks and normal tasks, but not + Job Preparation, Job Release or Start tasks. + :type total_tasks_run: int + :param running_tasks_count: The total number of currently running job + tasks on the compute node. This includes Job Manager tasks and normal + tasks, but not Job Preparation, Job Release or Start tasks. + :type running_tasks_count: int + :param total_tasks_succeeded: The total number of job tasks which + completed successfully (with exitCode 0) on the compute node. This + includes Job Manager tasks and normal tasks, but not Job Preparation, Job + Release or Start tasks. + :type total_tasks_succeeded: int + :param recent_tasks: A list of tasks whose state has recently changed. + This property is present only if at least one task has run on this node + since it was assigned to the pool. + :type recent_tasks: list[~azure.batch.models.TaskInformation] + :param start_task: The task specified to run on the compute node as it + joins the pool. + :type start_task: ~azure.batch.models.StartTask + :param start_task_info: Runtime information about the execution of the + start task on the compute node. + :type start_task_info: ~azure.batch.models.StartTaskInformation + :param certificate_references: The list of certificates installed on the + compute node. For Windows compute nodes, the Batch service installs the + certificates to the specified certificate store and location. For Linux + compute nodes, the certificates are stored in a directory inside the task + working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is + supplied to the task to query for this location. For certificates with + visibility of 'remoteUser', a 'certs' directory is created in the user's + home directory (e.g., /home/{user-name}/certs) and certificates are placed + in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param errors: The list of errors that are currently being encountered by + the compute node. + :type errors: list[~azure.batch.models.ComputeNodeError] + :param is_dedicated: Whether this compute node is a dedicated node. If + false, the node is a low-priority node. + :type is_dedicated: bool + :param endpoint_configuration: The endpoint configuration for the compute + node. + :type endpoint_configuration: + ~azure.batch.models.ComputeNodeEndpointConfiguration + :param node_agent_info: Information about the node agent version and the + time the node upgraded to a new version. + :type node_agent_info: ~azure.batch.models.NodeAgentInformation + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'ComputeNodeState'}, + 'scheduling_state': {'key': 'schedulingState', 'type': 'SchedulingState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'last_boot_time': {'key': 'lastBootTime', 'type': 'iso-8601'}, + 'allocation_time': {'key': 'allocationTime', 'type': 'iso-8601'}, + 'ip_address': {'key': 'ipAddress', 'type': 'str'}, + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'total_tasks_run': {'key': 'totalTasksRun', 'type': 'int'}, + 'running_tasks_count': {'key': 'runningTasksCount', 'type': 'int'}, + 'total_tasks_succeeded': {'key': 'totalTasksSucceeded', 'type': 'int'}, + 'recent_tasks': {'key': 'recentTasks', 'type': '[TaskInformation]'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'start_task_info': {'key': 'startTaskInfo', 'type': 'StartTaskInformation'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'errors': {'key': 'errors', 'type': '[ComputeNodeError]'}, + 'is_dedicated': {'key': 'isDedicated', 'type': 'bool'}, + 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'ComputeNodeEndpointConfiguration'}, + 'node_agent_info': {'key': 'nodeAgentInfo', 'type': 'NodeAgentInformation'}, + } + + def __init__(self, *, id: str=None, url: str=None, state=None, scheduling_state=None, state_transition_time=None, last_boot_time=None, allocation_time=None, ip_address: str=None, affinity_id: str=None, vm_size: str=None, total_tasks_run: int=None, running_tasks_count: int=None, total_tasks_succeeded: int=None, recent_tasks=None, start_task=None, start_task_info=None, certificate_references=None, errors=None, is_dedicated: bool=None, endpoint_configuration=None, node_agent_info=None, **kwargs) -> None: + super(ComputeNode, self).__init__(**kwargs) + self.id = id + self.url = url + self.state = state + self.scheduling_state = scheduling_state + self.state_transition_time = state_transition_time + self.last_boot_time = last_boot_time + self.allocation_time = allocation_time + self.ip_address = ip_address + self.affinity_id = affinity_id + self.vm_size = vm_size + self.total_tasks_run = total_tasks_run + self.running_tasks_count = running_tasks_count + self.total_tasks_succeeded = total_tasks_succeeded + self.recent_tasks = recent_tasks + self.start_task = start_task + self.start_task_info = start_task_info + self.certificate_references = certificate_references + self.errors = errors + self.is_dedicated = is_dedicated + self.endpoint_configuration = endpoint_configuration + self.node_agent_info = node_agent_info diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_reboot_options.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_reboot_options.py new file mode 100644 index 00000000..182c563e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_reboot_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeRebootOptions(Model): + """Additional parameters for reboot operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeRebootOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_reboot_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_reboot_options_py3.py new file mode 100644 index 00000000..97e8cb41 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_reboot_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeRebootOptions(Model): + """Additional parameters for reboot operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeRebootOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_reimage_options.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_reimage_options.py new file mode 100644 index 00000000..8ec6e55f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_reimage_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeReimageOptions(Model): + """Additional parameters for reimage operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeReimageOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_reimage_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_reimage_options_py3.py new file mode 100644 index 00000000..dcff3ee8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_reimage_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeReimageOptions(Model): + """Additional parameters for reimage operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeReimageOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_update_user_options.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_update_user_options.py new file mode 100644 index 00000000..ed1f9548 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_update_user_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUpdateUserOptions(Model): + """Additional parameters for update_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeUpdateUserOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_update_user_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_update_user_options_py3.py new file mode 100644 index 00000000..81e45b6c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_update_user_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUpdateUserOptions(Model): + """Additional parameters for update_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeUpdateUserOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_upload_batch_service_logs_options.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_upload_batch_service_logs_options.py new file mode 100644 index 00000000..071b712e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_upload_batch_service_logs_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUploadBatchServiceLogsOptions(Model): + """Additional parameters for upload_batch_service_logs operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeUploadBatchServiceLogsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_upload_batch_service_logs_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_upload_batch_service_logs_options_py3.py new file mode 100644 index 00000000..bac1dad5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_upload_batch_service_logs_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUploadBatchServiceLogsOptions(Model): + """Additional parameters for upload_batch_service_logs operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeUploadBatchServiceLogsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_user.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_user.py new file mode 100644 index 00000000..495b246e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_user.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUser(Model): + """A user account for RDP or SSH access on a compute node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The user name of the account. + :type name: str + :param is_admin: Whether the account should be an administrator on the + compute node. The default value is false. + :type is_admin: bool + :param expiry_time: The time at which the account should expire. If + omitted, the default is 1 day from the current time. For Linux compute + nodes, the expiryTime has a precision up to a day. + :type expiry_time: datetime + :param password: The password of the account. The password is required for + Windows nodes (those created with 'cloudServiceConfiguration', or created + with 'virtualMachineConfiguration' using a Windows image reference). For + Linux compute nodes, the password can optionally be specified along with + the sshPublicKey property. + :type password: str + :param ssh_public_key: The SSH public key that can be used for remote + login to the compute node. The public key should be compatible with + OpenSSH encoding and should be base 64 encoded. This property can be + specified only for Linux nodes. If this is specified for a Windows node, + then the Batch service rejects the request; if you are calling the REST + API directly, the HTTP status code is 400 (Bad Request). + :type ssh_public_key: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'is_admin': {'key': 'isAdmin', 'type': 'bool'}, + 'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'}, + 'password': {'key': 'password', 'type': 'str'}, + 'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeUser, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.is_admin = kwargs.get('is_admin', None) + self.expiry_time = kwargs.get('expiry_time', None) + self.password = kwargs.get('password', None) + self.ssh_public_key = kwargs.get('ssh_public_key', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/compute_node_user_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_user_py3.py new file mode 100644 index 00000000..a88a6ab2 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/compute_node_user_py3.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUser(Model): + """A user account for RDP or SSH access on a compute node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The user name of the account. + :type name: str + :param is_admin: Whether the account should be an administrator on the + compute node. The default value is false. + :type is_admin: bool + :param expiry_time: The time at which the account should expire. If + omitted, the default is 1 day from the current time. For Linux compute + nodes, the expiryTime has a precision up to a day. + :type expiry_time: datetime + :param password: The password of the account. The password is required for + Windows nodes (those created with 'cloudServiceConfiguration', or created + with 'virtualMachineConfiguration' using a Windows image reference). For + Linux compute nodes, the password can optionally be specified along with + the sshPublicKey property. + :type password: str + :param ssh_public_key: The SSH public key that can be used for remote + login to the compute node. The public key should be compatible with + OpenSSH encoding and should be base 64 encoded. This property can be + specified only for Linux nodes. If this is specified for a Windows node, + then the Batch service rejects the request; if you are calling the REST + API directly, the HTTP status code is 400 (Bad Request). + :type ssh_public_key: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'is_admin': {'key': 'isAdmin', 'type': 'bool'}, + 'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'}, + 'password': {'key': 'password', 'type': 'str'}, + 'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'}, + } + + def __init__(self, *, name: str, is_admin: bool=None, expiry_time=None, password: str=None, ssh_public_key: str=None, **kwargs) -> None: + super(ComputeNodeUser, self).__init__(**kwargs) + self.name = name + self.is_admin = is_admin + self.expiry_time = expiry_time + self.password = password + self.ssh_public_key = ssh_public_key diff --git a/azext/generated/sdk/batch/v2018_12_01/models/container_configuration.py b/azext/generated/sdk/batch/v2018_12_01/models/container_configuration.py new file mode 100644 index 00000000..f4b932c4 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/container_configuration.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerConfiguration(Model): + """The configuration for container-enabled pools. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar type: Required. The container technology to be used. Default value: + "dockerCompatible" . + :vartype type: str + :param container_image_names: The collection of container image names. + This is the full image reference, as would be specified to "docker pull". + An image will be sourced from the default Docker registry unless the image + is fully qualified with an alternative registry. + :type container_image_names: list[str] + :param container_registries: Additional private registries from which + containers can be pulled. If any images must be downloaded from a private + registry which requires credentials, then those credentials must be + provided here. + :type container_registries: list[~azure.batch.models.ContainerRegistry] + """ + + _validation = { + 'type': {'required': True, 'constant': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'container_image_names': {'key': 'containerImageNames', 'type': '[str]'}, + 'container_registries': {'key': 'containerRegistries', 'type': '[ContainerRegistry]'}, + } + + type = "dockerCompatible" + + def __init__(self, **kwargs): + super(ContainerConfiguration, self).__init__(**kwargs) + self.container_image_names = kwargs.get('container_image_names', None) + self.container_registries = kwargs.get('container_registries', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/container_configuration_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/container_configuration_py3.py new file mode 100644 index 00000000..f65b047e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/container_configuration_py3.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerConfiguration(Model): + """The configuration for container-enabled pools. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar type: Required. The container technology to be used. Default value: + "dockerCompatible" . + :vartype type: str + :param container_image_names: The collection of container image names. + This is the full image reference, as would be specified to "docker pull". + An image will be sourced from the default Docker registry unless the image + is fully qualified with an alternative registry. + :type container_image_names: list[str] + :param container_registries: Additional private registries from which + containers can be pulled. If any images must be downloaded from a private + registry which requires credentials, then those credentials must be + provided here. + :type container_registries: list[~azure.batch.models.ContainerRegistry] + """ + + _validation = { + 'type': {'required': True, 'constant': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'container_image_names': {'key': 'containerImageNames', 'type': '[str]'}, + 'container_registries': {'key': 'containerRegistries', 'type': '[ContainerRegistry]'}, + } + + type = "dockerCompatible" + + def __init__(self, *, container_image_names=None, container_registries=None, **kwargs) -> None: + super(ContainerConfiguration, self).__init__(**kwargs) + self.container_image_names = container_image_names + self.container_registries = container_registries diff --git a/azext/generated/sdk/batch/v2018_12_01/models/container_registry.py b/azext/generated/sdk/batch/v2018_12_01/models/container_registry.py new file mode 100644 index 00000000..18203196 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/container_registry.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerRegistry(Model): + """A private container registry. + + All required parameters must be populated in order to send to Azure. + + :param registry_server: The registry URL. If omitted, the default is + "docker.io". + :type registry_server: str + :param user_name: Required. The user name to log into the registry server. + :type user_name: str + :param password: Required. The password to log into the registry server. + :type password: str + """ + + _validation = { + 'user_name': {'required': True}, + 'password': {'required': True}, + } + + _attribute_map = { + 'registry_server': {'key': 'registryServer', 'type': 'str'}, + 'user_name': {'key': 'username', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ContainerRegistry, self).__init__(**kwargs) + self.registry_server = kwargs.get('registry_server', None) + self.user_name = kwargs.get('user_name', None) + self.password = kwargs.get('password', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/container_registry_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/container_registry_py3.py new file mode 100644 index 00000000..eb47f9e5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/container_registry_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerRegistry(Model): + """A private container registry. + + All required parameters must be populated in order to send to Azure. + + :param registry_server: The registry URL. If omitted, the default is + "docker.io". + :type registry_server: str + :param user_name: Required. The user name to log into the registry server. + :type user_name: str + :param password: Required. The password to log into the registry server. + :type password: str + """ + + _validation = { + 'user_name': {'required': True}, + 'password': {'required': True}, + } + + _attribute_map = { + 'registry_server': {'key': 'registryServer', 'type': 'str'}, + 'user_name': {'key': 'username', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, *, user_name: str, password: str, registry_server: str=None, **kwargs) -> None: + super(ContainerRegistry, self).__init__(**kwargs) + self.registry_server = registry_server + self.user_name = user_name + self.password = password diff --git a/azext/generated/sdk/batch/v2018_12_01/models/data_disk.py b/azext/generated/sdk/batch/v2018_12_01/models/data_disk.py new file mode 100644 index 00000000..d2f80dad --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/data_disk.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DataDisk(Model): + """Settings which will be used by the data disks associated to compute nodes + in the pool. + + All required parameters must be populated in order to send to Azure. + + :param lun: Required. The logical unit number. The lun is used to uniquely + identify each data disk. If attaching multiple disks, each should have a + distinct lun. + :type lun: int + :param caching: The type of caching to be enabled for the data disks. The + default value for caching is readwrite. For information about the caching + options see: + https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. + Possible values include: 'none', 'readOnly', 'readWrite' + :type caching: str or ~azure.batch.models.CachingType + :param disk_size_gb: Required. The initial disk size in gigabytes. + :type disk_size_gb: int + :param storage_account_type: The storage account type to be used for the + data disk. If omitted, the default is "standard_lrs". Possible values + include: 'StandardLRS', 'PremiumLRS' + :type storage_account_type: str or ~azure.batch.models.StorageAccountType + """ + + _validation = { + 'lun': {'required': True}, + 'disk_size_gb': {'required': True}, + } + + _attribute_map = { + 'lun': {'key': 'lun', 'type': 'int'}, + 'caching': {'key': 'caching', 'type': 'CachingType'}, + 'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'}, + 'storage_account_type': {'key': 'storageAccountType', 'type': 'StorageAccountType'}, + } + + def __init__(self, **kwargs): + super(DataDisk, self).__init__(**kwargs) + self.lun = kwargs.get('lun', None) + self.caching = kwargs.get('caching', None) + self.disk_size_gb = kwargs.get('disk_size_gb', None) + self.storage_account_type = kwargs.get('storage_account_type', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/data_disk_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/data_disk_py3.py new file mode 100644 index 00000000..2af0ce19 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/data_disk_py3.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DataDisk(Model): + """Settings which will be used by the data disks associated to compute nodes + in the pool. + + All required parameters must be populated in order to send to Azure. + + :param lun: Required. The logical unit number. The lun is used to uniquely + identify each data disk. If attaching multiple disks, each should have a + distinct lun. + :type lun: int + :param caching: The type of caching to be enabled for the data disks. The + default value for caching is readwrite. For information about the caching + options see: + https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. + Possible values include: 'none', 'readOnly', 'readWrite' + :type caching: str or ~azure.batch.models.CachingType + :param disk_size_gb: Required. The initial disk size in gigabytes. + :type disk_size_gb: int + :param storage_account_type: The storage account type to be used for the + data disk. If omitted, the default is "standard_lrs". Possible values + include: 'StandardLRS', 'PremiumLRS' + :type storage_account_type: str or ~azure.batch.models.StorageAccountType + """ + + _validation = { + 'lun': {'required': True}, + 'disk_size_gb': {'required': True}, + } + + _attribute_map = { + 'lun': {'key': 'lun', 'type': 'int'}, + 'caching': {'key': 'caching', 'type': 'CachingType'}, + 'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'}, + 'storage_account_type': {'key': 'storageAccountType', 'type': 'StorageAccountType'}, + } + + def __init__(self, *, lun: int, disk_size_gb: int, caching=None, storage_account_type=None, **kwargs) -> None: + super(DataDisk, self).__init__(**kwargs) + self.lun = lun + self.caching = caching + self.disk_size_gb = disk_size_gb + self.storage_account_type = storage_account_type diff --git a/azext/generated/sdk/batch/v2018_12_01/models/delete_certificate_error.py b/azext/generated/sdk/batch/v2018_12_01/models/delete_certificate_error.py new file mode 100644 index 00000000..dbb14c3c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/delete_certificate_error.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DeleteCertificateError(Model): + """An error encountered by the Batch service when deleting a certificate. + + :param code: An identifier for the certificate deletion error. Codes are + invariant and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the certificate deletion error, + intended to be suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the + certificate deletion error. This list includes details such as the active + pools and nodes referencing this certificate. However, if a large number + of resources reference the certificate, the list contains only about the + first hundred. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(DeleteCertificateError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.values = kwargs.get('values', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/delete_certificate_error_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/delete_certificate_error_py3.py new file mode 100644 index 00000000..246a12a0 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/delete_certificate_error_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DeleteCertificateError(Model): + """An error encountered by the Batch service when deleting a certificate. + + :param code: An identifier for the certificate deletion error. Codes are + invariant and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the certificate deletion error, + intended to be suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the + certificate deletion error. This list includes details such as the active + pools and nodes referencing this certificate. However, if a large number + of resources reference the certificate, the list contains only about the + first hundred. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, code: str=None, message: str=None, values=None, **kwargs) -> None: + super(DeleteCertificateError, self).__init__(**kwargs) + self.code = code + self.message = message + self.values = values diff --git a/azext/generated/sdk/batch/v2018_12_01/models/environment_setting.py b/azext/generated/sdk/batch/v2018_12_01/models/environment_setting.py new file mode 100644 index 00000000..e46a6e5e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/environment_setting.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class EnvironmentSetting(Model): + """An environment variable to be set on a task process. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the environment variable. + :type name: str + :param value: The value of the environment variable. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(EnvironmentSetting, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/environment_setting_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/environment_setting_py3.py new file mode 100644 index 00000000..facb8f44 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/environment_setting_py3.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class EnvironmentSetting(Model): + """An environment variable to be set on a task process. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the environment variable. + :type name: str + :param value: The value of the environment variable. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, *, name: str, value: str=None, **kwargs) -> None: + super(EnvironmentSetting, self).__init__(**kwargs) + self.name = name + self.value = value diff --git a/azext/generated/sdk/batch/v2018_12_01/models/error_message.py b/azext/generated/sdk/batch/v2018_12_01/models/error_message.py new file mode 100644 index 00000000..bbdf64f1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/error_message.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ErrorMessage(Model): + """An error message received in an Azure Batch error response. + + :param lang: The language code of the error message. + :type lang: str + :param value: The text of the message. + :type value: str + """ + + _attribute_map = { + 'lang': {'key': 'lang', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ErrorMessage, self).__init__(**kwargs) + self.lang = kwargs.get('lang', None) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/error_message_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/error_message_py3.py new file mode 100644 index 00000000..a84934fc --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/error_message_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ErrorMessage(Model): + """An error message received in an Azure Batch error response. + + :param lang: The language code of the error message. + :type lang: str + :param value: The text of the message. + :type value: str + """ + + _attribute_map = { + 'lang': {'key': 'lang', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, *, lang: str=None, value: str=None, **kwargs) -> None: + super(ErrorMessage, self).__init__(**kwargs) + self.lang = lang + self.value = value diff --git a/azext/generated/sdk/batch/v2018_12_01/models/exit_code_mapping.py b/azext/generated/sdk/batch/v2018_12_01/models/exit_code_mapping.py new file mode 100644 index 00000000..57977e3d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/exit_code_mapping.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitCodeMapping(Model): + """How the Batch service should respond if a task exits with a particular exit + code. + + All required parameters must be populated in order to send to Azure. + + :param code: Required. A process exit code. + :type code: int + :param exit_options: Required. How the Batch service should respond if the + task exits with this exit code. + :type exit_options: ~azure.batch.models.ExitOptions + """ + + _validation = { + 'code': {'required': True}, + 'exit_options': {'required': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'int'}, + 'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'}, + } + + def __init__(self, **kwargs): + super(ExitCodeMapping, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.exit_options = kwargs.get('exit_options', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/exit_code_mapping_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/exit_code_mapping_py3.py new file mode 100644 index 00000000..5a5176bb --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/exit_code_mapping_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitCodeMapping(Model): + """How the Batch service should respond if a task exits with a particular exit + code. + + All required parameters must be populated in order to send to Azure. + + :param code: Required. A process exit code. + :type code: int + :param exit_options: Required. How the Batch service should respond if the + task exits with this exit code. + :type exit_options: ~azure.batch.models.ExitOptions + """ + + _validation = { + 'code': {'required': True}, + 'exit_options': {'required': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'int'}, + 'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'}, + } + + def __init__(self, *, code: int, exit_options, **kwargs) -> None: + super(ExitCodeMapping, self).__init__(**kwargs) + self.code = code + self.exit_options = exit_options diff --git a/azext/generated/sdk/batch/v2018_12_01/models/exit_code_range_mapping.py b/azext/generated/sdk/batch/v2018_12_01/models/exit_code_range_mapping.py new file mode 100644 index 00000000..999272ae --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/exit_code_range_mapping.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitCodeRangeMapping(Model): + """A range of exit codes and how the Batch service should respond to exit + codes within that range. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. The first exit code in the range. + :type start: int + :param end: Required. The last exit code in the range. + :type end: int + :param exit_options: Required. How the Batch service should respond if the + task exits with an exit code in the range start to end (inclusive). + :type exit_options: ~azure.batch.models.ExitOptions + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + 'exit_options': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'start', 'type': 'int'}, + 'end': {'key': 'end', 'type': 'int'}, + 'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'}, + } + + def __init__(self, **kwargs): + super(ExitCodeRangeMapping, self).__init__(**kwargs) + self.start = kwargs.get('start', None) + self.end = kwargs.get('end', None) + self.exit_options = kwargs.get('exit_options', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/exit_code_range_mapping_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/exit_code_range_mapping_py3.py new file mode 100644 index 00000000..9a3da0bd --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/exit_code_range_mapping_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitCodeRangeMapping(Model): + """A range of exit codes and how the Batch service should respond to exit + codes within that range. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. The first exit code in the range. + :type start: int + :param end: Required. The last exit code in the range. + :type end: int + :param exit_options: Required. How the Batch service should respond if the + task exits with an exit code in the range start to end (inclusive). + :type exit_options: ~azure.batch.models.ExitOptions + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + 'exit_options': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'start', 'type': 'int'}, + 'end': {'key': 'end', 'type': 'int'}, + 'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'}, + } + + def __init__(self, *, start: int, end: int, exit_options, **kwargs) -> None: + super(ExitCodeRangeMapping, self).__init__(**kwargs) + self.start = start + self.end = end + self.exit_options = exit_options diff --git a/azext/generated/sdk/batch/v2018_12_01/models/exit_conditions.py b/azext/generated/sdk/batch/v2018_12_01/models/exit_conditions.py new file mode 100644 index 00000000..66085863 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/exit_conditions.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitConditions(Model): + """Specifies how the Batch service should respond when the task completes. + + :param exit_codes: A list of individual task exit codes and how the Batch + service should respond to them. + :type exit_codes: list[~azure.batch.models.ExitCodeMapping] + :param exit_code_ranges: A list of task exit code ranges and how the Batch + service should respond to them. + :type exit_code_ranges: list[~azure.batch.models.ExitCodeRangeMapping] + :param pre_processing_error: How the Batch service should respond if the + task fails to start due to an error. + :type pre_processing_error: ~azure.batch.models.ExitOptions + :param file_upload_error: How the Batch service should respond if a file + upload error occurs. If the task exited with an exit code that was + specified via exitCodes or exitCodeRanges, and then encountered a file + upload error, then the action specified by the exit code takes precedence. + :type file_upload_error: ~azure.batch.models.ExitOptions + :param default: How the Batch service should respond if the task fails + with an exit condition not covered by any of the other properties. This + value is used if the task exits with any nonzero exit code not listed in + the exitCodes or exitCodeRanges collection, with a pre-processing error if + the preProcessingError property is not present, or with a file upload + error if the fileUploadError property is not present. If you want + non-default behavior on exit code 0, you must list it explicitly using the + exitCodes or exitCodeRanges collection. + :type default: ~azure.batch.models.ExitOptions + """ + + _attribute_map = { + 'exit_codes': {'key': 'exitCodes', 'type': '[ExitCodeMapping]'}, + 'exit_code_ranges': {'key': 'exitCodeRanges', 'type': '[ExitCodeRangeMapping]'}, + 'pre_processing_error': {'key': 'preProcessingError', 'type': 'ExitOptions'}, + 'file_upload_error': {'key': 'fileUploadError', 'type': 'ExitOptions'}, + 'default': {'key': 'default', 'type': 'ExitOptions'}, + } + + def __init__(self, **kwargs): + super(ExitConditions, self).__init__(**kwargs) + self.exit_codes = kwargs.get('exit_codes', None) + self.exit_code_ranges = kwargs.get('exit_code_ranges', None) + self.pre_processing_error = kwargs.get('pre_processing_error', None) + self.file_upload_error = kwargs.get('file_upload_error', None) + self.default = kwargs.get('default', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/exit_conditions_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/exit_conditions_py3.py new file mode 100644 index 00000000..65cd5aaa --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/exit_conditions_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitConditions(Model): + """Specifies how the Batch service should respond when the task completes. + + :param exit_codes: A list of individual task exit codes and how the Batch + service should respond to them. + :type exit_codes: list[~azure.batch.models.ExitCodeMapping] + :param exit_code_ranges: A list of task exit code ranges and how the Batch + service should respond to them. + :type exit_code_ranges: list[~azure.batch.models.ExitCodeRangeMapping] + :param pre_processing_error: How the Batch service should respond if the + task fails to start due to an error. + :type pre_processing_error: ~azure.batch.models.ExitOptions + :param file_upload_error: How the Batch service should respond if a file + upload error occurs. If the task exited with an exit code that was + specified via exitCodes or exitCodeRanges, and then encountered a file + upload error, then the action specified by the exit code takes precedence. + :type file_upload_error: ~azure.batch.models.ExitOptions + :param default: How the Batch service should respond if the task fails + with an exit condition not covered by any of the other properties. This + value is used if the task exits with any nonzero exit code not listed in + the exitCodes or exitCodeRanges collection, with a pre-processing error if + the preProcessingError property is not present, or with a file upload + error if the fileUploadError property is not present. If you want + non-default behavior on exit code 0, you must list it explicitly using the + exitCodes or exitCodeRanges collection. + :type default: ~azure.batch.models.ExitOptions + """ + + _attribute_map = { + 'exit_codes': {'key': 'exitCodes', 'type': '[ExitCodeMapping]'}, + 'exit_code_ranges': {'key': 'exitCodeRanges', 'type': '[ExitCodeRangeMapping]'}, + 'pre_processing_error': {'key': 'preProcessingError', 'type': 'ExitOptions'}, + 'file_upload_error': {'key': 'fileUploadError', 'type': 'ExitOptions'}, + 'default': {'key': 'default', 'type': 'ExitOptions'}, + } + + def __init__(self, *, exit_codes=None, exit_code_ranges=None, pre_processing_error=None, file_upload_error=None, default=None, **kwargs) -> None: + super(ExitConditions, self).__init__(**kwargs) + self.exit_codes = exit_codes + self.exit_code_ranges = exit_code_ranges + self.pre_processing_error = pre_processing_error + self.file_upload_error = file_upload_error + self.default = default diff --git a/azext/generated/sdk/batch/v2018_12_01/models/exit_options.py b/azext/generated/sdk/batch/v2018_12_01/models/exit_options.py new file mode 100644 index 00000000..f7f5e7b1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/exit_options.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitOptions(Model): + """Specifies how the Batch service responds to a particular exit condition. + + :param job_action: An action to take on the job containing the task, if + the task completes with the given exit condition and the job's + onTaskFailed property is 'performExitOptionsJobAction'. The default is + none for exit code 0 and terminate for all other exit conditions. If the + job's onTaskFailed property is noaction, then specifying this property + returns an error and the add task request fails with an invalid property + value error; if you are calling the REST API directly, the HTTP status + code is 400 (Bad Request). Possible values include: 'none', 'disable', + 'terminate' + :type job_action: str or ~azure.batch.models.JobAction + :param dependency_action: An action that the Batch service performs on + tasks that depend on this task. The default is 'satisfy' for exit code 0, + and 'block' for all other exit conditions. If the job's + usesTaskDependencies property is set to false, then specifying the + dependencyAction property returns an error and the add task request fails + with an invalid property value error; if you are calling the REST API + directly, the HTTP status code is 400 (Bad Request). Possible values + include: 'satisfy', 'block' + :type dependency_action: str or ~azure.batch.models.DependencyAction + """ + + _attribute_map = { + 'job_action': {'key': 'jobAction', 'type': 'JobAction'}, + 'dependency_action': {'key': 'dependencyAction', 'type': 'DependencyAction'}, + } + + def __init__(self, **kwargs): + super(ExitOptions, self).__init__(**kwargs) + self.job_action = kwargs.get('job_action', None) + self.dependency_action = kwargs.get('dependency_action', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/exit_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/exit_options_py3.py new file mode 100644 index 00000000..0867bbea --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/exit_options_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitOptions(Model): + """Specifies how the Batch service responds to a particular exit condition. + + :param job_action: An action to take on the job containing the task, if + the task completes with the given exit condition and the job's + onTaskFailed property is 'performExitOptionsJobAction'. The default is + none for exit code 0 and terminate for all other exit conditions. If the + job's onTaskFailed property is noaction, then specifying this property + returns an error and the add task request fails with an invalid property + value error; if you are calling the REST API directly, the HTTP status + code is 400 (Bad Request). Possible values include: 'none', 'disable', + 'terminate' + :type job_action: str or ~azure.batch.models.JobAction + :param dependency_action: An action that the Batch service performs on + tasks that depend on this task. The default is 'satisfy' for exit code 0, + and 'block' for all other exit conditions. If the job's + usesTaskDependencies property is set to false, then specifying the + dependencyAction property returns an error and the add task request fails + with an invalid property value error; if you are calling the REST API + directly, the HTTP status code is 400 (Bad Request). Possible values + include: 'satisfy', 'block' + :type dependency_action: str or ~azure.batch.models.DependencyAction + """ + + _attribute_map = { + 'job_action': {'key': 'jobAction', 'type': 'JobAction'}, + 'dependency_action': {'key': 'dependencyAction', 'type': 'DependencyAction'}, + } + + def __init__(self, *, job_action=None, dependency_action=None, **kwargs) -> None: + super(ExitOptions, self).__init__(**kwargs) + self.job_action = job_action + self.dependency_action = dependency_action diff --git a/azext/generated/sdk/batch/v2018_12_01/models/file_delete_from_compute_node_options.py b/azext/generated/sdk/batch/v2018_12_01/models/file_delete_from_compute_node_options.py new file mode 100644 index 00000000..7522e806 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/file_delete_from_compute_node_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileDeleteFromComputeNodeOptions(Model): + """Additional parameters for delete_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileDeleteFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/file_delete_from_compute_node_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/file_delete_from_compute_node_options_py3.py new file mode 100644 index 00000000..62291d14 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/file_delete_from_compute_node_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileDeleteFromComputeNodeOptions(Model): + """Additional parameters for delete_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(FileDeleteFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/file_delete_from_task_options.py b/azext/generated/sdk/batch/v2018_12_01/models/file_delete_from_task_options.py new file mode 100644 index 00000000..054babe8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/file_delete_from_task_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileDeleteFromTaskOptions(Model): + """Additional parameters for delete_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileDeleteFromTaskOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/file_delete_from_task_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/file_delete_from_task_options_py3.py new file mode 100644 index 00000000..7d783006 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/file_delete_from_task_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileDeleteFromTaskOptions(Model): + """Additional parameters for delete_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(FileDeleteFromTaskOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/file_get_from_compute_node_options.py b/azext/generated/sdk/batch/v2018_12_01/models/file_get_from_compute_node_options.py new file mode 100644 index 00000000..9a6e3fb7 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/file_get_from_compute_node_options.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetFromComputeNodeOptions(Model): + """Additional parameters for get_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param ocp_range: The byte range to be retrieved. The default is to + retrieve the entire file. The format is bytes=startRange-endRange. + :type ocp_range: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'ocp_range': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileGetFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.ocp_range = kwargs.get('ocp_range', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/file_get_from_compute_node_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/file_get_from_compute_node_options_py3.py new file mode 100644 index 00000000..ab3dc34f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/file_get_from_compute_node_options_py3.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetFromComputeNodeOptions(Model): + """Additional parameters for get_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param ocp_range: The byte range to be retrieved. The default is to + retrieve the entire file. The format is bytes=startRange-endRange. + :type ocp_range: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'ocp_range': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, ocp_range: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(FileGetFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.ocp_range = ocp_range + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/file_get_from_task_options.py b/azext/generated/sdk/batch/v2018_12_01/models/file_get_from_task_options.py new file mode 100644 index 00000000..19bd5cde --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/file_get_from_task_options.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetFromTaskOptions(Model): + """Additional parameters for get_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param ocp_range: The byte range to be retrieved. The default is to + retrieve the entire file. The format is bytes=startRange-endRange. + :type ocp_range: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'ocp_range': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileGetFromTaskOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.ocp_range = kwargs.get('ocp_range', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/file_get_from_task_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/file_get_from_task_options_py3.py new file mode 100644 index 00000000..30ec6583 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/file_get_from_task_options_py3.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetFromTaskOptions(Model): + """Additional parameters for get_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param ocp_range: The byte range to be retrieved. The default is to + retrieve the entire file. The format is bytes=startRange-endRange. + :type ocp_range: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'ocp_range': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, ocp_range: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(FileGetFromTaskOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.ocp_range = ocp_range + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/file_get_properties_from_compute_node_options.py b/azext/generated/sdk/batch/v2018_12_01/models/file_get_properties_from_compute_node_options.py new file mode 100644 index 00000000..bf283d1d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/file_get_properties_from_compute_node_options.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetPropertiesFromComputeNodeOptions(Model): + """Additional parameters for get_properties_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileGetPropertiesFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/file_get_properties_from_compute_node_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/file_get_properties_from_compute_node_options_py3.py new file mode 100644 index 00000000..69a90184 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/file_get_properties_from_compute_node_options_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetPropertiesFromComputeNodeOptions(Model): + """Additional parameters for get_properties_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(FileGetPropertiesFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/file_get_properties_from_task_options.py b/azext/generated/sdk/batch/v2018_12_01/models/file_get_properties_from_task_options.py new file mode 100644 index 00000000..836387d3 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/file_get_properties_from_task_options.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetPropertiesFromTaskOptions(Model): + """Additional parameters for get_properties_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileGetPropertiesFromTaskOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/file_get_properties_from_task_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/file_get_properties_from_task_options_py3.py new file mode 100644 index 00000000..73996895 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/file_get_properties_from_task_options_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetPropertiesFromTaskOptions(Model): + """Additional parameters for get_properties_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(FileGetPropertiesFromTaskOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/file_list_from_compute_node_options.py b/azext/generated/sdk/batch/v2018_12_01/models/file_list_from_compute_node_options.py new file mode 100644 index 00000000..dc32df46 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/file_list_from_compute_node_options.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileListFromComputeNodeOptions(Model): + """Additional parameters for list_from_compute_node operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 files can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileListFromComputeNodeOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/file_list_from_compute_node_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/file_list_from_compute_node_options_py3.py new file mode 100644 index 00000000..e475dcde --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/file_list_from_compute_node_options_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileListFromComputeNodeOptions(Model): + """Additional parameters for list_from_compute_node operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 files can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(FileListFromComputeNodeOptions, self).__init__(**kwargs) + self.filter = filter + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/file_list_from_task_options.py b/azext/generated/sdk/batch/v2018_12_01/models/file_list_from_task_options.py new file mode 100644 index 00000000..86728b25 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/file_list_from_task_options.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileListFromTaskOptions(Model): + """Additional parameters for list_from_task operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 files can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileListFromTaskOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/file_list_from_task_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/file_list_from_task_options_py3.py new file mode 100644 index 00000000..354c4869 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/file_list_from_task_options_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileListFromTaskOptions(Model): + """Additional parameters for list_from_task operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 files can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(FileListFromTaskOptions, self).__init__(**kwargs) + self.filter = filter + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/file_properties.py b/azext/generated/sdk/batch/v2018_12_01/models/file_properties.py new file mode 100644 index 00000000..3cef0000 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/file_properties.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileProperties(Model): + """The properties of a file on a compute node. + + All required parameters must be populated in order to send to Azure. + + :param creation_time: The file creation time. The creation time is not + returned for files on Linux compute nodes. + :type creation_time: datetime + :param last_modified: Required. The time at which the file was last + modified. + :type last_modified: datetime + :param content_length: Required. The length of the file. + :type content_length: long + :param content_type: The content type of the file. + :type content_type: str + :param file_mode: The file mode attribute in octal format. The file mode + is returned only for files on Linux compute nodes. + :type file_mode: str + """ + + _validation = { + 'last_modified': {'required': True}, + 'content_length': {'required': True}, + } + + _attribute_map = { + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'content_length': {'key': 'contentLength', 'type': 'long'}, + 'content_type': {'key': 'contentType', 'type': 'str'}, + 'file_mode': {'key': 'fileMode', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(FileProperties, self).__init__(**kwargs) + self.creation_time = kwargs.get('creation_time', None) + self.last_modified = kwargs.get('last_modified', None) + self.content_length = kwargs.get('content_length', None) + self.content_type = kwargs.get('content_type', None) + self.file_mode = kwargs.get('file_mode', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/file_properties_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/file_properties_py3.py new file mode 100644 index 00000000..71c2a8e6 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/file_properties_py3.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileProperties(Model): + """The properties of a file on a compute node. + + All required parameters must be populated in order to send to Azure. + + :param creation_time: The file creation time. The creation time is not + returned for files on Linux compute nodes. + :type creation_time: datetime + :param last_modified: Required. The time at which the file was last + modified. + :type last_modified: datetime + :param content_length: Required. The length of the file. + :type content_length: long + :param content_type: The content type of the file. + :type content_type: str + :param file_mode: The file mode attribute in octal format. The file mode + is returned only for files on Linux compute nodes. + :type file_mode: str + """ + + _validation = { + 'last_modified': {'required': True}, + 'content_length': {'required': True}, + } + + _attribute_map = { + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'content_length': {'key': 'contentLength', 'type': 'long'}, + 'content_type': {'key': 'contentType', 'type': 'str'}, + 'file_mode': {'key': 'fileMode', 'type': 'str'}, + } + + def __init__(self, *, last_modified, content_length: int, creation_time=None, content_type: str=None, file_mode: str=None, **kwargs) -> None: + super(FileProperties, self).__init__(**kwargs) + self.creation_time = creation_time + self.last_modified = last_modified + self.content_length = content_length + self.content_type = content_type + self.file_mode = file_mode diff --git a/azext/generated/sdk/batch/v2018_12_01/models/image_reference.py b/azext/generated/sdk/batch/v2018_12_01/models/image_reference.py new file mode 100644 index 00000000..36f0cb14 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/image_reference.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageReference(Model): + """A reference to an Azure Virtual Machines Marketplace image or a custom + Azure Virtual Machine image. To get the list of all Azure Marketplace image + references verified by Azure Batch, see the 'List node agent SKUs' + operation. + + :param publisher: The publisher of the Azure Virtual Machines Marketplace + image. For example, Canonical or MicrosoftWindowsServer. + :type publisher: str + :param offer: The offer type of the Azure Virtual Machines Marketplace + image. For example, UbuntuServer or WindowsServer. + :type offer: str + :param sku: The SKU of the Azure Virtual Machines Marketplace image. For + example, 14.04.0-LTS or 2012-R2-Datacenter. + :type sku: str + :param version: The version of the Azure Virtual Machines Marketplace + image. A value of 'latest' can be specified to select the latest version + of an image. If omitted, the default is 'latest'. + :type version: str + :param virtual_machine_image_id: The ARM resource identifier of the + virtual machine image. Computes nodes of the pool will be created using + this custom image. This is of the form + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. + This property is mutually exclusive with other ImageReference properties. + The virtual machine image must be in the same region and subscription as + the Azure Batch account. For more details, see + https://docs.microsoft.com/azure/batch/batch-custom-images. + :type virtual_machine_image_id: str + """ + + _attribute_map = { + 'publisher': {'key': 'publisher', 'type': 'str'}, + 'offer': {'key': 'offer', 'type': 'str'}, + 'sku': {'key': 'sku', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'virtual_machine_image_id': {'key': 'virtualMachineImageId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ImageReference, self).__init__(**kwargs) + self.publisher = kwargs.get('publisher', None) + self.offer = kwargs.get('offer', None) + self.sku = kwargs.get('sku', None) + self.version = kwargs.get('version', None) + self.virtual_machine_image_id = kwargs.get('virtual_machine_image_id', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/image_reference_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/image_reference_py3.py new file mode 100644 index 00000000..1a40536e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/image_reference_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageReference(Model): + """A reference to an Azure Virtual Machines Marketplace image or a custom + Azure Virtual Machine image. To get the list of all Azure Marketplace image + references verified by Azure Batch, see the 'List node agent SKUs' + operation. + + :param publisher: The publisher of the Azure Virtual Machines Marketplace + image. For example, Canonical or MicrosoftWindowsServer. + :type publisher: str + :param offer: The offer type of the Azure Virtual Machines Marketplace + image. For example, UbuntuServer or WindowsServer. + :type offer: str + :param sku: The SKU of the Azure Virtual Machines Marketplace image. For + example, 14.04.0-LTS or 2012-R2-Datacenter. + :type sku: str + :param version: The version of the Azure Virtual Machines Marketplace + image. A value of 'latest' can be specified to select the latest version + of an image. If omitted, the default is 'latest'. + :type version: str + :param virtual_machine_image_id: The ARM resource identifier of the + virtual machine image. Computes nodes of the pool will be created using + this custom image. This is of the form + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. + This property is mutually exclusive with other ImageReference properties. + The virtual machine image must be in the same region and subscription as + the Azure Batch account. For more details, see + https://docs.microsoft.com/azure/batch/batch-custom-images. + :type virtual_machine_image_id: str + """ + + _attribute_map = { + 'publisher': {'key': 'publisher', 'type': 'str'}, + 'offer': {'key': 'offer', 'type': 'str'}, + 'sku': {'key': 'sku', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'virtual_machine_image_id': {'key': 'virtualMachineImageId', 'type': 'str'}, + } + + def __init__(self, *, publisher: str=None, offer: str=None, sku: str=None, version: str=None, virtual_machine_image_id: str=None, **kwargs) -> None: + super(ImageReference, self).__init__(**kwargs) + self.publisher = publisher + self.offer = offer + self.sku = sku + self.version = version + self.virtual_machine_image_id = virtual_machine_image_id diff --git a/azext/generated/sdk/batch/v2018_12_01/models/inbound_endpoint.py b/azext/generated/sdk/batch/v2018_12_01/models/inbound_endpoint.py new file mode 100644 index 00000000..8fd064a9 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/inbound_endpoint.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class InboundEndpoint(Model): + """An inbound endpoint on a compute node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the endpoint. + :type name: str + :param protocol: Required. The protocol of the endpoint. Possible values + include: 'tcp', 'udp' + :type protocol: str or ~azure.batch.models.InboundEndpointProtocol + :param public_ip_address: Required. The public IP address of the compute + node. + :type public_ip_address: str + :param public_fqdn: Required. The public fully qualified domain name for + the compute node. + :type public_fqdn: str + :param frontend_port: Required. The public port number of the endpoint. + :type frontend_port: int + :param backend_port: Required. The backend port number of the endpoint. + :type backend_port: int + """ + + _validation = { + 'name': {'required': True}, + 'protocol': {'required': True}, + 'public_ip_address': {'required': True}, + 'public_fqdn': {'required': True}, + 'frontend_port': {'required': True}, + 'backend_port': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'protocol': {'key': 'protocol', 'type': 'InboundEndpointProtocol'}, + 'public_ip_address': {'key': 'publicIPAddress', 'type': 'str'}, + 'public_fqdn': {'key': 'publicFQDN', 'type': 'str'}, + 'frontend_port': {'key': 'frontendPort', 'type': 'int'}, + 'backend_port': {'key': 'backendPort', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(InboundEndpoint, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.protocol = kwargs.get('protocol', None) + self.public_ip_address = kwargs.get('public_ip_address', None) + self.public_fqdn = kwargs.get('public_fqdn', None) + self.frontend_port = kwargs.get('frontend_port', None) + self.backend_port = kwargs.get('backend_port', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/inbound_endpoint_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/inbound_endpoint_py3.py new file mode 100644 index 00000000..004e1577 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/inbound_endpoint_py3.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class InboundEndpoint(Model): + """An inbound endpoint on a compute node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the endpoint. + :type name: str + :param protocol: Required. The protocol of the endpoint. Possible values + include: 'tcp', 'udp' + :type protocol: str or ~azure.batch.models.InboundEndpointProtocol + :param public_ip_address: Required. The public IP address of the compute + node. + :type public_ip_address: str + :param public_fqdn: Required. The public fully qualified domain name for + the compute node. + :type public_fqdn: str + :param frontend_port: Required. The public port number of the endpoint. + :type frontend_port: int + :param backend_port: Required. The backend port number of the endpoint. + :type backend_port: int + """ + + _validation = { + 'name': {'required': True}, + 'protocol': {'required': True}, + 'public_ip_address': {'required': True}, + 'public_fqdn': {'required': True}, + 'frontend_port': {'required': True}, + 'backend_port': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'protocol': {'key': 'protocol', 'type': 'InboundEndpointProtocol'}, + 'public_ip_address': {'key': 'publicIPAddress', 'type': 'str'}, + 'public_fqdn': {'key': 'publicFQDN', 'type': 'str'}, + 'frontend_port': {'key': 'frontendPort', 'type': 'int'}, + 'backend_port': {'key': 'backendPort', 'type': 'int'}, + } + + def __init__(self, *, name: str, protocol, public_ip_address: str, public_fqdn: str, frontend_port: int, backend_port: int, **kwargs) -> None: + super(InboundEndpoint, self).__init__(**kwargs) + self.name = name + self.protocol = protocol + self.public_ip_address = public_ip_address + self.public_fqdn = public_fqdn + self.frontend_port = frontend_port + self.backend_port = backend_port diff --git a/azext/generated/sdk/batch/v2018_12_01/models/inbound_nat_pool.py b/azext/generated/sdk/batch/v2018_12_01/models/inbound_nat_pool.py new file mode 100644 index 00000000..bf3209e9 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/inbound_nat_pool.py @@ -0,0 +1,86 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class InboundNATPool(Model): + """A inbound NAT pool that can be used to address specific ports on compute + nodes in a Batch pool externally. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the endpoint. The name must be unique + within a Batch pool, can contain letters, numbers, underscores, periods, + and hyphens. Names must start with a letter or number, must end with a + letter, number, or underscore, and cannot exceed 77 characters. If any + invalid values are provided the request fails with HTTP status code 400. + :type name: str + :param protocol: Required. The protocol of the endpoint. Possible values + include: 'tcp', 'udp' + :type protocol: str or ~azure.batch.models.InboundEndpointProtocol + :param backend_port: Required. The port number on the compute node. This + must be unique within a Batch pool. Acceptable values are between 1 and + 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any + reserved values are provided the request fails with HTTP status code 400. + :type backend_port: int + :param frontend_port_range_start: Required. The first port number in the + range of external ports that will be used to provide inbound access to the + backendPort on individual compute nodes. Acceptable values range between 1 + and 65534 except ports from 50000 to 55000 which are reserved. All ranges + within a pool must be distinct and cannot overlap. Each range must contain + at least 40 ports. If any reserved or overlapping values are provided the + request fails with HTTP status code 400. + :type frontend_port_range_start: int + :param frontend_port_range_end: Required. The last port number in the + range of external ports that will be used to provide inbound access to the + backendPort on individual compute nodes. Acceptable values range between 1 + and 65534 except ports from 50000 to 55000 which are reserved by the Batch + service. All ranges within a pool must be distinct and cannot overlap. + Each range must contain at least 40 ports. If any reserved or overlapping + values are provided the request fails with HTTP status code 400. + :type frontend_port_range_end: int + :param network_security_group_rules: A list of network security group + rules that will be applied to the endpoint. The maximum number of rules + that can be specified across all the endpoints on a Batch pool is 25. If + no network security group rules are specified, a default rule will be + created to allow inbound access to the specified backendPort. If the + maximum number of network security group rules is exceeded the request + fails with HTTP status code 400. + :type network_security_group_rules: + list[~azure.batch.models.NetworkSecurityGroupRule] + """ + + _validation = { + 'name': {'required': True}, + 'protocol': {'required': True}, + 'backend_port': {'required': True}, + 'frontend_port_range_start': {'required': True}, + 'frontend_port_range_end': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'protocol': {'key': 'protocol', 'type': 'InboundEndpointProtocol'}, + 'backend_port': {'key': 'backendPort', 'type': 'int'}, + 'frontend_port_range_start': {'key': 'frontendPortRangeStart', 'type': 'int'}, + 'frontend_port_range_end': {'key': 'frontendPortRangeEnd', 'type': 'int'}, + 'network_security_group_rules': {'key': 'networkSecurityGroupRules', 'type': '[NetworkSecurityGroupRule]'}, + } + + def __init__(self, **kwargs): + super(InboundNATPool, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.protocol = kwargs.get('protocol', None) + self.backend_port = kwargs.get('backend_port', None) + self.frontend_port_range_start = kwargs.get('frontend_port_range_start', None) + self.frontend_port_range_end = kwargs.get('frontend_port_range_end', None) + self.network_security_group_rules = kwargs.get('network_security_group_rules', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/inbound_nat_pool_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/inbound_nat_pool_py3.py new file mode 100644 index 00000000..ed91e373 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/inbound_nat_pool_py3.py @@ -0,0 +1,86 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class InboundNATPool(Model): + """A inbound NAT pool that can be used to address specific ports on compute + nodes in a Batch pool externally. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the endpoint. The name must be unique + within a Batch pool, can contain letters, numbers, underscores, periods, + and hyphens. Names must start with a letter or number, must end with a + letter, number, or underscore, and cannot exceed 77 characters. If any + invalid values are provided the request fails with HTTP status code 400. + :type name: str + :param protocol: Required. The protocol of the endpoint. Possible values + include: 'tcp', 'udp' + :type protocol: str or ~azure.batch.models.InboundEndpointProtocol + :param backend_port: Required. The port number on the compute node. This + must be unique within a Batch pool. Acceptable values are between 1 and + 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any + reserved values are provided the request fails with HTTP status code 400. + :type backend_port: int + :param frontend_port_range_start: Required. The first port number in the + range of external ports that will be used to provide inbound access to the + backendPort on individual compute nodes. Acceptable values range between 1 + and 65534 except ports from 50000 to 55000 which are reserved. All ranges + within a pool must be distinct and cannot overlap. Each range must contain + at least 40 ports. If any reserved or overlapping values are provided the + request fails with HTTP status code 400. + :type frontend_port_range_start: int + :param frontend_port_range_end: Required. The last port number in the + range of external ports that will be used to provide inbound access to the + backendPort on individual compute nodes. Acceptable values range between 1 + and 65534 except ports from 50000 to 55000 which are reserved by the Batch + service. All ranges within a pool must be distinct and cannot overlap. + Each range must contain at least 40 ports. If any reserved or overlapping + values are provided the request fails with HTTP status code 400. + :type frontend_port_range_end: int + :param network_security_group_rules: A list of network security group + rules that will be applied to the endpoint. The maximum number of rules + that can be specified across all the endpoints on a Batch pool is 25. If + no network security group rules are specified, a default rule will be + created to allow inbound access to the specified backendPort. If the + maximum number of network security group rules is exceeded the request + fails with HTTP status code 400. + :type network_security_group_rules: + list[~azure.batch.models.NetworkSecurityGroupRule] + """ + + _validation = { + 'name': {'required': True}, + 'protocol': {'required': True}, + 'backend_port': {'required': True}, + 'frontend_port_range_start': {'required': True}, + 'frontend_port_range_end': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'protocol': {'key': 'protocol', 'type': 'InboundEndpointProtocol'}, + 'backend_port': {'key': 'backendPort', 'type': 'int'}, + 'frontend_port_range_start': {'key': 'frontendPortRangeStart', 'type': 'int'}, + 'frontend_port_range_end': {'key': 'frontendPortRangeEnd', 'type': 'int'}, + 'network_security_group_rules': {'key': 'networkSecurityGroupRules', 'type': '[NetworkSecurityGroupRule]'}, + } + + def __init__(self, *, name: str, protocol, backend_port: int, frontend_port_range_start: int, frontend_port_range_end: int, network_security_group_rules=None, **kwargs) -> None: + super(InboundNATPool, self).__init__(**kwargs) + self.name = name + self.protocol = protocol + self.backend_port = backend_port + self.frontend_port_range_start = frontend_port_range_start + self.frontend_port_range_end = frontend_port_range_end + self.network_security_group_rules = network_security_group_rules diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_add_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_add_options.py new file mode 100644 index 00000000..bdcf7969 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_add_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobAddOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_add_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_add_options_py3.py new file mode 100644 index 00000000..9633e748 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_add_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobAddOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_add_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/job_add_parameter.py new file mode 100644 index 00000000..659c6975 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_add_parameter.py @@ -0,0 +1,138 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobAddParameter(Model): + """An Azure Batch job to add. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the job within the + account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within an account that differ only by case). + :type id: str + :param display_name: The display name for the job. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param priority: The priority of the job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. The default value is 0. + :type priority: int + :param constraints: The execution constraints for the job. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: Details of a Job Manager task to be launched when + the job is started. If the job does not specify a Job Manager task, the + user must explicitly add tasks to the job. If the job does specify a Job + Manager task, the Batch service creates the Job Manager task when the job + is created, and will try to schedule the Job Manager task before + scheduling other tasks in the job. The Job Manager task's typical purpose + is to control and/or monitor job execution, for example by deciding what + additional tasks to run, determining when the work is complete, etc. + (However, a Job Manager task is not restricted to these activities - it is + a fully-fledged task in the system and perform whatever actions are + required for the job.) For example, a Job Manager task might download a + file specified as a parameter, analyze the contents of that file and + submit additional tasks based on those contents. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation task. If a job has a Job + Preparation task, the Batch service will run the Job Preparation task on a + compute node before starting any tasks of that job on that compute node. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release task. A Job Release task cannot + be specified without also specifying a Job Preparation task for the job. + The Batch service runs the Job Release task on the compute nodes that have + run the Job Preparation task. The primary purpose of the Job Release task + is to undo changes to compute nodes made by the Job Preparation task. + Example activities include deleting local files, or shutting down services + that were started as part of job preparation. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: The list of common environment + variable settings. These environment variables are set for all tasks in + the job (including the Job Manager, Job Preparation and Job Release + tasks). Individual tasks can override an environment setting specified + here by specifying the same setting name with a different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: Required. The pool on which the Batch service runs the + job's tasks. + :type pool_info: ~azure.batch.models.PoolInformation + :param on_all_tasks_complete: The action the Batch service should take + when all tasks in the job are in the completed state. Note that if a job + contains no tasks, then all tasks are considered complete. This option is + therefore most commonly used with a Job Manager task; if you want to use + automatic job termination without a Job Manager, you should initially set + onAllTasksComplete to noaction and update the job properties to set + onAllTasksComplete to terminatejob once you have finished adding tasks. + The default is noaction. Possible values include: 'noAction', + 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + task in the job fails. A task is considered to have failed if has a + failureInfo. A failureInfo is set if the task completes with a non-zero + exit code after exhausting its retry count, or if there was an error + starting the task, for example due to a resource file download error. The + default is noaction. Possible values include: 'noAction', + 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param metadata: A list of name-value pairs associated with the job as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param uses_task_dependencies: Whether tasks in the job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + :param network_configuration: The network configuration for the job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration + """ + + _validation = { + 'id': {'required': True}, + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, + } + + def __init__(self, **kwargs): + super(JobAddParameter, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.priority = kwargs.get('priority', None) + self.constraints = kwargs.get('constraints', None) + self.job_manager_task = kwargs.get('job_manager_task', None) + self.job_preparation_task = kwargs.get('job_preparation_task', None) + self.job_release_task = kwargs.get('job_release_task', None) + self.common_environment_settings = kwargs.get('common_environment_settings', None) + self.pool_info = kwargs.get('pool_info', None) + self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) + self.on_task_failure = kwargs.get('on_task_failure', None) + self.metadata = kwargs.get('metadata', None) + self.uses_task_dependencies = kwargs.get('uses_task_dependencies', None) + self.network_configuration = kwargs.get('network_configuration', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_add_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_add_parameter_py3.py new file mode 100644 index 00000000..08c329c3 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_add_parameter_py3.py @@ -0,0 +1,138 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobAddParameter(Model): + """An Azure Batch job to add. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the job within the + account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within an account that differ only by case). + :type id: str + :param display_name: The display name for the job. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param priority: The priority of the job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. The default value is 0. + :type priority: int + :param constraints: The execution constraints for the job. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: Details of a Job Manager task to be launched when + the job is started. If the job does not specify a Job Manager task, the + user must explicitly add tasks to the job. If the job does specify a Job + Manager task, the Batch service creates the Job Manager task when the job + is created, and will try to schedule the Job Manager task before + scheduling other tasks in the job. The Job Manager task's typical purpose + is to control and/or monitor job execution, for example by deciding what + additional tasks to run, determining when the work is complete, etc. + (However, a Job Manager task is not restricted to these activities - it is + a fully-fledged task in the system and perform whatever actions are + required for the job.) For example, a Job Manager task might download a + file specified as a parameter, analyze the contents of that file and + submit additional tasks based on those contents. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation task. If a job has a Job + Preparation task, the Batch service will run the Job Preparation task on a + compute node before starting any tasks of that job on that compute node. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release task. A Job Release task cannot + be specified without also specifying a Job Preparation task for the job. + The Batch service runs the Job Release task on the compute nodes that have + run the Job Preparation task. The primary purpose of the Job Release task + is to undo changes to compute nodes made by the Job Preparation task. + Example activities include deleting local files, or shutting down services + that were started as part of job preparation. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: The list of common environment + variable settings. These environment variables are set for all tasks in + the job (including the Job Manager, Job Preparation and Job Release + tasks). Individual tasks can override an environment setting specified + here by specifying the same setting name with a different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: Required. The pool on which the Batch service runs the + job's tasks. + :type pool_info: ~azure.batch.models.PoolInformation + :param on_all_tasks_complete: The action the Batch service should take + when all tasks in the job are in the completed state. Note that if a job + contains no tasks, then all tasks are considered complete. This option is + therefore most commonly used with a Job Manager task; if you want to use + automatic job termination without a Job Manager, you should initially set + onAllTasksComplete to noaction and update the job properties to set + onAllTasksComplete to terminatejob once you have finished adding tasks. + The default is noaction. Possible values include: 'noAction', + 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + task in the job fails. A task is considered to have failed if has a + failureInfo. A failureInfo is set if the task completes with a non-zero + exit code after exhausting its retry count, or if there was an error + starting the task, for example due to a resource file download error. The + default is noaction. Possible values include: 'noAction', + 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param metadata: A list of name-value pairs associated with the job as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param uses_task_dependencies: Whether tasks in the job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + :param network_configuration: The network configuration for the job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration + """ + + _validation = { + 'id': {'required': True}, + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, + } + + def __init__(self, *, id: str, pool_info, display_name: str=None, priority: int=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, on_all_tasks_complete=None, on_task_failure=None, metadata=None, uses_task_dependencies: bool=None, network_configuration=None, **kwargs) -> None: + super(JobAddParameter, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.priority = priority + self.constraints = constraints + self.job_manager_task = job_manager_task + self.job_preparation_task = job_preparation_task + self.job_release_task = job_release_task + self.common_environment_settings = common_environment_settings + self.pool_info = pool_info + self.on_all_tasks_complete = on_all_tasks_complete + self.on_task_failure = on_task_failure + self.metadata = metadata + self.uses_task_dependencies = uses_task_dependencies + self.network_configuration = network_configuration diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_constraints.py b/azext/generated/sdk/batch/v2018_12_01/models/job_constraints.py new file mode 100644 index 00000000..070a37ae --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_constraints.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobConstraints(Model): + """The execution constraints for a job. + + :param max_wall_clock_time: The maximum elapsed time that the job may run, + measured from the time the job is created. If the job does not complete + within the time limit, the Batch service terminates it and any tasks that + are still running. In this case, the termination reason will be + MaxWallClockTimeExpiry. If this property is not specified, there is no + time limit on how long the job may run. + :type max_wall_clock_time: timedelta + :param max_task_retry_count: The maximum number of times each task may be + retried. The Batch service retries a task if its exit code is nonzero. + Note that this value specifically controls the number of retries. The + Batch service will try each task once, and may then retry up to this + limit. For example, if the maximum retry count is 3, Batch tries a task up + to 4 times (one initial try and 3 retries). If the maximum retry count is + 0, the Batch service does not retry tasks. If the maximum retry count is + -1, the Batch service retries tasks without limit. The default value is 0 + (no retries). + :type max_task_retry_count: int + """ + + _attribute_map = { + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(JobConstraints, self).__init__(**kwargs) + self.max_wall_clock_time = kwargs.get('max_wall_clock_time', None) + self.max_task_retry_count = kwargs.get('max_task_retry_count', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_constraints_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_constraints_py3.py new file mode 100644 index 00000000..06340ebe --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_constraints_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobConstraints(Model): + """The execution constraints for a job. + + :param max_wall_clock_time: The maximum elapsed time that the job may run, + measured from the time the job is created. If the job does not complete + within the time limit, the Batch service terminates it and any tasks that + are still running. In this case, the termination reason will be + MaxWallClockTimeExpiry. If this property is not specified, there is no + time limit on how long the job may run. + :type max_wall_clock_time: timedelta + :param max_task_retry_count: The maximum number of times each task may be + retried. The Batch service retries a task if its exit code is nonzero. + Note that this value specifically controls the number of retries. The + Batch service will try each task once, and may then retry up to this + limit. For example, if the maximum retry count is 3, Batch tries a task up + to 4 times (one initial try and 3 retries). If the maximum retry count is + 0, the Batch service does not retry tasks. If the maximum retry count is + -1, the Batch service retries tasks without limit. The default value is 0 + (no retries). + :type max_task_retry_count: int + """ + + _attribute_map = { + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + } + + def __init__(self, *, max_wall_clock_time=None, max_task_retry_count: int=None, **kwargs) -> None: + super(JobConstraints, self).__init__(**kwargs) + self.max_wall_clock_time = max_wall_clock_time + self.max_task_retry_count = max_task_retry_count diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_delete_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_delete_options.py new file mode 100644 index 00000000..a537b55e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_delete_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobDeleteOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_delete_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_delete_options_py3.py new file mode 100644 index 00000000..821db0e8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_delete_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobDeleteOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_disable_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_disable_options.py new file mode 100644 index 00000000..c6694516 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_disable_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDisableOptions(Model): + """Additional parameters for disable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobDisableOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_disable_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_disable_options_py3.py new file mode 100644 index 00000000..4b077714 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_disable_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDisableOptions(Model): + """Additional parameters for disable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobDisableOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_disable_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/job_disable_parameter.py new file mode 100644 index 00000000..d86c965f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_disable_parameter.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDisableParameter(Model): + """Options when disabling a job. + + All required parameters must be populated in order to send to Azure. + + :param disable_tasks: Required. What to do with active tasks associated + with the job. Possible values include: 'requeue', 'terminate', 'wait' + :type disable_tasks: str or ~azure.batch.models.DisableJobOption + """ + + _validation = { + 'disable_tasks': {'required': True}, + } + + _attribute_map = { + 'disable_tasks': {'key': 'disableTasks', 'type': 'DisableJobOption'}, + } + + def __init__(self, **kwargs): + super(JobDisableParameter, self).__init__(**kwargs) + self.disable_tasks = kwargs.get('disable_tasks', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_disable_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_disable_parameter_py3.py new file mode 100644 index 00000000..fd99f78e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_disable_parameter_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDisableParameter(Model): + """Options when disabling a job. + + All required parameters must be populated in order to send to Azure. + + :param disable_tasks: Required. What to do with active tasks associated + with the job. Possible values include: 'requeue', 'terminate', 'wait' + :type disable_tasks: str or ~azure.batch.models.DisableJobOption + """ + + _validation = { + 'disable_tasks': {'required': True}, + } + + _attribute_map = { + 'disable_tasks': {'key': 'disableTasks', 'type': 'DisableJobOption'}, + } + + def __init__(self, *, disable_tasks, **kwargs) -> None: + super(JobDisableParameter, self).__init__(**kwargs) + self.disable_tasks = disable_tasks diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_enable_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_enable_options.py new file mode 100644 index 00000000..182f2b04 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_enable_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobEnableOptions(Model): + """Additional parameters for enable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobEnableOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_enable_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_enable_options_py3.py new file mode 100644 index 00000000..47695f37 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_enable_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobEnableOptions(Model): + """Additional parameters for enable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobEnableOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_execution_information.py b/azext/generated/sdk/batch/v2018_12_01/models/job_execution_information.py new file mode 100644 index 00000000..28f5a31d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_execution_information.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobExecutionInformation(Model): + """Contains information about the execution of a job in the Azure Batch + service. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the job. This is the time + at which the job was created. + :type start_time: datetime + :param end_time: The completion time of the job. This property is set only + if the job is in the completed state. + :type end_time: datetime + :param pool_id: The ID of the pool to which this job is assigned. This + element contains the actual pool where the job is assigned. When you get + job details from the service, they also contain a poolInfo element, which + contains the pool configuration data from when the job was added or + updated. That poolInfo element may also contain a poolId element. If it + does, the two IDs are the same. If it does not, it means the job ran on an + auto pool, and this property contains the ID of that auto pool. + :type pool_id: str + :param scheduling_error: Details of any error encountered by the service + in starting the job. This property is not set if there was no error + starting the job. + :type scheduling_error: ~azure.batch.models.JobSchedulingError + :param terminate_reason: A string describing the reason the job ended. + This property is set only if the job is in the completed state. If the + Batch service terminates the job, it sets the reason as follows: + JMComplete - the Job Manager task completed, and killJobOnCompletion was + set to true. MaxWallClockTimeExpiry - the job reached its maxWallClockTime + constraint. TerminateJobSchedule - the job ran as part of a schedule, and + the schedule terminated. AllTasksComplete - the job's onAllTasksComplete + attribute is set to terminatejob, and all tasks in the job are complete. + TaskFailed - the job's onTaskFailure attribute is set to + performExitOptionsJobAction, and a task in the job failed with an exit + condition that specified a jobAction of terminatejob. Any other string is + a user-defined reason specified in a call to the 'Terminate a job' + operation. + :type terminate_reason: str + """ + + _validation = { + 'start_time': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'scheduling_error': {'key': 'schedulingError', 'type': 'JobSchedulingError'}, + 'terminate_reason': {'key': 'terminateReason', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(JobExecutionInformation, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.pool_id = kwargs.get('pool_id', None) + self.scheduling_error = kwargs.get('scheduling_error', None) + self.terminate_reason = kwargs.get('terminate_reason', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_execution_information_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_execution_information_py3.py new file mode 100644 index 00000000..436d2990 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_execution_information_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobExecutionInformation(Model): + """Contains information about the execution of a job in the Azure Batch + service. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the job. This is the time + at which the job was created. + :type start_time: datetime + :param end_time: The completion time of the job. This property is set only + if the job is in the completed state. + :type end_time: datetime + :param pool_id: The ID of the pool to which this job is assigned. This + element contains the actual pool where the job is assigned. When you get + job details from the service, they also contain a poolInfo element, which + contains the pool configuration data from when the job was added or + updated. That poolInfo element may also contain a poolId element. If it + does, the two IDs are the same. If it does not, it means the job ran on an + auto pool, and this property contains the ID of that auto pool. + :type pool_id: str + :param scheduling_error: Details of any error encountered by the service + in starting the job. This property is not set if there was no error + starting the job. + :type scheduling_error: ~azure.batch.models.JobSchedulingError + :param terminate_reason: A string describing the reason the job ended. + This property is set only if the job is in the completed state. If the + Batch service terminates the job, it sets the reason as follows: + JMComplete - the Job Manager task completed, and killJobOnCompletion was + set to true. MaxWallClockTimeExpiry - the job reached its maxWallClockTime + constraint. TerminateJobSchedule - the job ran as part of a schedule, and + the schedule terminated. AllTasksComplete - the job's onAllTasksComplete + attribute is set to terminatejob, and all tasks in the job are complete. + TaskFailed - the job's onTaskFailure attribute is set to + performExitOptionsJobAction, and a task in the job failed with an exit + condition that specified a jobAction of terminatejob. Any other string is + a user-defined reason specified in a call to the 'Terminate a job' + operation. + :type terminate_reason: str + """ + + _validation = { + 'start_time': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'scheduling_error': {'key': 'schedulingError', 'type': 'JobSchedulingError'}, + 'terminate_reason': {'key': 'terminateReason', 'type': 'str'}, + } + + def __init__(self, *, start_time, end_time=None, pool_id: str=None, scheduling_error=None, terminate_reason: str=None, **kwargs) -> None: + super(JobExecutionInformation, self).__init__(**kwargs) + self.start_time = start_time + self.end_time = end_time + self.pool_id = pool_id + self.scheduling_error = scheduling_error + self.terminate_reason = terminate_reason diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_get_all_lifetime_statistics_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_get_all_lifetime_statistics_options.py new file mode 100644 index 00000000..a8f7e849 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_get_all_lifetime_statistics_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetAllLifetimeStatisticsOptions(Model): + """Additional parameters for get_all_lifetime_statistics operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobGetAllLifetimeStatisticsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_get_all_lifetime_statistics_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_get_all_lifetime_statistics_options_py3.py new file mode 100644 index 00000000..2092bbd8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_get_all_lifetime_statistics_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetAllLifetimeStatisticsOptions(Model): + """Additional parameters for get_all_lifetime_statistics operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobGetAllLifetimeStatisticsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_get_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_get_options.py new file mode 100644 index 00000000..62d47959 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_get_options.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_get_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_get_options_py3.py new file mode 100644 index 00000000..9ed21fc3 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_get_options_py3.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobGetOptions, self).__init__(**kwargs) + self.select = select + self.expand = expand + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_get_task_counts_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_get_task_counts_options.py new file mode 100644 index 00000000..603d79ce --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_get_task_counts_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetTaskCountsOptions(Model): + """Additional parameters for get_task_counts operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobGetTaskCountsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_get_task_counts_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_get_task_counts_options_py3.py new file mode 100644 index 00000000..b109e59e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_get_task_counts_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetTaskCountsOptions(Model): + """Additional parameters for get_task_counts operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobGetTaskCountsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_list_from_job_schedule_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_list_from_job_schedule_options.py new file mode 100644 index 00000000..7f95aaf7 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_list_from_job_schedule_options.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListFromJobScheduleOptions(Model): + """Additional parameters for list_from_job_schedule operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 jobs can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobListFromJobScheduleOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_list_from_job_schedule_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_list_from_job_schedule_options_py3.py new file mode 100644 index 00000000..eb606478 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_list_from_job_schedule_options_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListFromJobScheduleOptions(Model): + """Additional parameters for list_from_job_schedule operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 jobs can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobListFromJobScheduleOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.expand = expand + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_list_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_list_options.py new file mode 100644 index 00000000..b9d34191 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_list_options.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 jobs can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_list_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_list_options_py3.py new file mode 100644 index 00000000..f7787cd7 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_list_options_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 jobs can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.expand = expand + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_list_preparation_and_release_task_status_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_list_preparation_and_release_task_status_options.py new file mode 100644 index 00000000..443ebba5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_list_preparation_and_release_task_status_options.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListPreparationAndReleaseTaskStatusOptions(Model): + """Additional parameters for list_preparation_and_release_task_status + operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 tasks can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobListPreparationAndReleaseTaskStatusOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_list_preparation_and_release_task_status_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_list_preparation_and_release_task_status_options_py3.py new file mode 100644 index 00000000..a7353629 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_list_preparation_and_release_task_status_options_py3.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListPreparationAndReleaseTaskStatusOptions(Model): + """Additional parameters for list_preparation_and_release_task_status + operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 tasks can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobListPreparationAndReleaseTaskStatusOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_manager_task.py b/azext/generated/sdk/batch/v2018_12_01/models/job_manager_task.py new file mode 100644 index 00000000..38d54ac0 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_manager_task.py @@ -0,0 +1,182 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobManagerTask(Model): + """Specifies details of a Job Manager task. + + The Job Manager task is automatically started when the job is created. The + Batch service tries to schedule the Job Manager task before any other tasks + in the job. When shrinking a pool, the Batch service tries to preserve + compute nodes where Job Manager tasks are running for as long as possible + (that is, nodes running 'normal' tasks are removed before nodes running Job + Manager tasks). When a Job Manager task fails and needs to be restarted, + the system tries to schedule it at the highest priority. If there are no + idle nodes available, the system may terminate one of the running tasks in + the pool and return it to the queue in order to make room for the Job + Manager task to restart. Note that a Job Manager task in one job does not + have priority over tasks in other jobs. Across jobs, only job level + priorities are observed. For example, if a Job Manager in a priority 0 job + needs to be restarted, it will not displace tasks of a priority 1 job. + Batch will retry tasks when a recovery operation is triggered on a compute + node. Examples of recovery operations include (but are not limited to) when + an unhealthy compute node is rebooted or a compute node disappeared due to + host failure. Retries due to recovery operations are independent of and are + not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is + 0, an internal retry due to a recovery operation may occur. Because of + this, all tasks should be idempotent. This means tasks need to tolerate + being interrupted and restarted without causing any corruption or duplicate + data. The best practice for long running tasks is to use some form of + checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the Job Manager + task within the job. The ID can contain any combination of alphanumeric + characters including hyphens and underscores and cannot contain more than + 64 characters. + :type id: str + :param display_name: The display name of the Job Manager task. It need not + be unique and can contain any Unicode characters up to a maximum length of + 1024. + :type display_name: str + :param command_line: Required. The command line of the Job Manager task. + The command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Manager task runs. If the pool that will run this task has + containerConfiguration set, this must be set as well. If the pool that + will run this task doesn't have containerConfiguration set, this must not + be set. When this is specified, all directories recursively below the + AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) + are mapped into the container, all task environment variables are mapped + into the container, and the task command line is executed in the + container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. Files listed + under this element are located in the task's working directory. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the compute node after running the command line. For multi-instance + tasks, the files will only be uploaded from the compute node on which the + primary task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the Job Manager task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param constraints: Constraints that apply to the Job Manager task. + :type constraints: ~azure.batch.models.TaskConstraints + :param kill_job_on_completion: Whether completion of the Job Manager task + signifies completion of the entire job. If true, when the Job Manager task + completes, the Batch service marks the job as complete. If any tasks are + still running at this time (other than Job Release), those tasks are + terminated. If false, the completion of the Job Manager task does not + affect the job status. In this case, you should either use the + onAllTasksComplete attribute to terminate the job, or have a client or + user terminate the job explicitly. An example of this is if the Job + Manager creates a set of tasks but then takes no further role in their + execution. The default value is true. If you are using the + onAllTasksComplete and onTaskFailure attributes to control job lifetime, + and using the Job Manager task only to create the tasks for the job (not + to monitor progress), then it is important to set killJobOnCompletion to + false. + :type kill_job_on_completion: bool + :param user_identity: The user identity under which the Job Manager task + runs. If omitted, the task runs as a non-administrative user unique to the + task. + :type user_identity: ~azure.batch.models.UserIdentity + :param run_exclusive: Whether the Job Manager task requires exclusive use + of the compute node where it runs. If true, no other tasks will run on the + same compute node for as long as the Job Manager is running. If false, + other tasks can run simultaneously with the Job Manager on a compute node. + The Job Manager task counts normally against the node's concurrent task + limit, so this is only relevant if the node allows multiple concurrent + tasks. The default value is true. + :type run_exclusive: bool + :param application_package_references: A list of application packages that + the Batch service will deploy to the compute node before running the + command line. Application packages are downloaded and deployed to a shared + directory, not the task working directory. Therefore, if a referenced + package is already on the compute node, and is up to date, then it is not + re-downloaded; the existing copy on the compute node is used. If a + referenced application package cannot be installed, for example because + the package has been deleted or because download failed, the task fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the task can use to perform Batch service operations. If this + property is set, the Batch service provides the task with an + authentication token which can be used to authenticate Batch service + operations without requiring an account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the task can carry out using the token depend on the settings. For + example, a task can request job permissions in order to add other tasks to + the job, or check the status of the job or of other tasks under the job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + :param allow_low_priority_node: Whether the Job Manager task may run on a + low-priority compute node. The default value is true. + :type allow_low_priority_node: bool + """ + + _validation = { + 'id': {'required': True}, + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'kill_job_on_completion': {'key': 'killJobOnCompletion', 'type': 'bool'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'run_exclusive': {'key': 'runExclusive', 'type': 'bool'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + 'allow_low_priority_node': {'key': 'allowLowPriorityNode', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(JobManagerTask, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.resource_files = kwargs.get('resource_files', None) + self.output_files = kwargs.get('output_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.constraints = kwargs.get('constraints', None) + self.kill_job_on_completion = kwargs.get('kill_job_on_completion', None) + self.user_identity = kwargs.get('user_identity', None) + self.run_exclusive = kwargs.get('run_exclusive', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.authentication_token_settings = kwargs.get('authentication_token_settings', None) + self.allow_low_priority_node = kwargs.get('allow_low_priority_node', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_manager_task_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_manager_task_py3.py new file mode 100644 index 00000000..668b182b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_manager_task_py3.py @@ -0,0 +1,182 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobManagerTask(Model): + """Specifies details of a Job Manager task. + + The Job Manager task is automatically started when the job is created. The + Batch service tries to schedule the Job Manager task before any other tasks + in the job. When shrinking a pool, the Batch service tries to preserve + compute nodes where Job Manager tasks are running for as long as possible + (that is, nodes running 'normal' tasks are removed before nodes running Job + Manager tasks). When a Job Manager task fails and needs to be restarted, + the system tries to schedule it at the highest priority. If there are no + idle nodes available, the system may terminate one of the running tasks in + the pool and return it to the queue in order to make room for the Job + Manager task to restart. Note that a Job Manager task in one job does not + have priority over tasks in other jobs. Across jobs, only job level + priorities are observed. For example, if a Job Manager in a priority 0 job + needs to be restarted, it will not displace tasks of a priority 1 job. + Batch will retry tasks when a recovery operation is triggered on a compute + node. Examples of recovery operations include (but are not limited to) when + an unhealthy compute node is rebooted or a compute node disappeared due to + host failure. Retries due to recovery operations are independent of and are + not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is + 0, an internal retry due to a recovery operation may occur. Because of + this, all tasks should be idempotent. This means tasks need to tolerate + being interrupted and restarted without causing any corruption or duplicate + data. The best practice for long running tasks is to use some form of + checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the Job Manager + task within the job. The ID can contain any combination of alphanumeric + characters including hyphens and underscores and cannot contain more than + 64 characters. + :type id: str + :param display_name: The display name of the Job Manager task. It need not + be unique and can contain any Unicode characters up to a maximum length of + 1024. + :type display_name: str + :param command_line: Required. The command line of the Job Manager task. + The command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Manager task runs. If the pool that will run this task has + containerConfiguration set, this must be set as well. If the pool that + will run this task doesn't have containerConfiguration set, this must not + be set. When this is specified, all directories recursively below the + AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) + are mapped into the container, all task environment variables are mapped + into the container, and the task command line is executed in the + container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. Files listed + under this element are located in the task's working directory. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the compute node after running the command line. For multi-instance + tasks, the files will only be uploaded from the compute node on which the + primary task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the Job Manager task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param constraints: Constraints that apply to the Job Manager task. + :type constraints: ~azure.batch.models.TaskConstraints + :param kill_job_on_completion: Whether completion of the Job Manager task + signifies completion of the entire job. If true, when the Job Manager task + completes, the Batch service marks the job as complete. If any tasks are + still running at this time (other than Job Release), those tasks are + terminated. If false, the completion of the Job Manager task does not + affect the job status. In this case, you should either use the + onAllTasksComplete attribute to terminate the job, or have a client or + user terminate the job explicitly. An example of this is if the Job + Manager creates a set of tasks but then takes no further role in their + execution. The default value is true. If you are using the + onAllTasksComplete and onTaskFailure attributes to control job lifetime, + and using the Job Manager task only to create the tasks for the job (not + to monitor progress), then it is important to set killJobOnCompletion to + false. + :type kill_job_on_completion: bool + :param user_identity: The user identity under which the Job Manager task + runs. If omitted, the task runs as a non-administrative user unique to the + task. + :type user_identity: ~azure.batch.models.UserIdentity + :param run_exclusive: Whether the Job Manager task requires exclusive use + of the compute node where it runs. If true, no other tasks will run on the + same compute node for as long as the Job Manager is running. If false, + other tasks can run simultaneously with the Job Manager on a compute node. + The Job Manager task counts normally against the node's concurrent task + limit, so this is only relevant if the node allows multiple concurrent + tasks. The default value is true. + :type run_exclusive: bool + :param application_package_references: A list of application packages that + the Batch service will deploy to the compute node before running the + command line. Application packages are downloaded and deployed to a shared + directory, not the task working directory. Therefore, if a referenced + package is already on the compute node, and is up to date, then it is not + re-downloaded; the existing copy on the compute node is used. If a + referenced application package cannot be installed, for example because + the package has been deleted or because download failed, the task fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the task can use to perform Batch service operations. If this + property is set, the Batch service provides the task with an + authentication token which can be used to authenticate Batch service + operations without requiring an account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the task can carry out using the token depend on the settings. For + example, a task can request job permissions in order to add other tasks to + the job, or check the status of the job or of other tasks under the job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + :param allow_low_priority_node: Whether the Job Manager task may run on a + low-priority compute node. The default value is true. + :type allow_low_priority_node: bool + """ + + _validation = { + 'id': {'required': True}, + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'kill_job_on_completion': {'key': 'killJobOnCompletion', 'type': 'bool'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'run_exclusive': {'key': 'runExclusive', 'type': 'bool'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + 'allow_low_priority_node': {'key': 'allowLowPriorityNode', 'type': 'bool'}, + } + + def __init__(self, *, id: str, command_line: str, display_name: str=None, container_settings=None, resource_files=None, output_files=None, environment_settings=None, constraints=None, kill_job_on_completion: bool=None, user_identity=None, run_exclusive: bool=None, application_package_references=None, authentication_token_settings=None, allow_low_priority_node: bool=None, **kwargs) -> None: + super(JobManagerTask, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.command_line = command_line + self.container_settings = container_settings + self.resource_files = resource_files + self.output_files = output_files + self.environment_settings = environment_settings + self.constraints = constraints + self.kill_job_on_completion = kill_job_on_completion + self.user_identity = user_identity + self.run_exclusive = run_exclusive + self.application_package_references = application_package_references + self.authentication_token_settings = authentication_token_settings + self.allow_low_priority_node = allow_low_priority_node diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_network_configuration.py b/azext/generated/sdk/batch/v2018_12_01/models/job_network_configuration.py new file mode 100644 index 00000000..b7dee32c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_network_configuration.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobNetworkConfiguration(Model): + """The network configuration for the job. + + All required parameters must be populated in order to send to Azure. + + :param subnet_id: Required. The ARM resource identifier of the virtual + network subnet which nodes running tasks from the job will join for the + duration of the task. This is only supported for jobs running on + VirtualMachineConfiguration pools. This is of the form + /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. + The virtual network must be in the same region and subscription as the + Azure Batch account. The specified subnet should have enough free IP + addresses to accommodate the number of nodes which will run tasks from the + job. For more details, see + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + :type subnet_id: str + """ + + _validation = { + 'subnet_id': {'required': True}, + } + + _attribute_map = { + 'subnet_id': {'key': 'subnetId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(JobNetworkConfiguration, self).__init__(**kwargs) + self.subnet_id = kwargs.get('subnet_id', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_network_configuration_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_network_configuration_py3.py new file mode 100644 index 00000000..d47cbb6d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_network_configuration_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobNetworkConfiguration(Model): + """The network configuration for the job. + + All required parameters must be populated in order to send to Azure. + + :param subnet_id: Required. The ARM resource identifier of the virtual + network subnet which nodes running tasks from the job will join for the + duration of the task. This is only supported for jobs running on + VirtualMachineConfiguration pools. This is of the form + /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. + The virtual network must be in the same region and subscription as the + Azure Batch account. The specified subnet should have enough free IP + addresses to accommodate the number of nodes which will run tasks from the + job. For more details, see + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + :type subnet_id: str + """ + + _validation = { + 'subnet_id': {'required': True}, + } + + _attribute_map = { + 'subnet_id': {'key': 'subnetId', 'type': 'str'}, + } + + def __init__(self, *, subnet_id: str, **kwargs) -> None: + super(JobNetworkConfiguration, self).__init__(**kwargs) + self.subnet_id = subnet_id diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_patch_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_patch_options.py new file mode 100644 index 00000000..9fdbb4f3 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_patch_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobPatchOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_patch_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_patch_options_py3.py new file mode 100644 index 00000000..586e381d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_patch_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobPatchOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_patch_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/job_patch_parameter.py new file mode 100644 index 00000000..2088a145 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_patch_parameter.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPatchParameter(Model): + """The set of changes to be made to a job. + + :param priority: The priority of the job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. If omitted, the priority of the job is left unchanged. + :type priority: int + :param on_all_tasks_complete: The action the Batch service should take + when all tasks in the job are in the completed state. If omitted, the + completion behavior is left unchanged. You may not change the value from + terminatejob to noaction - that is, once you have engaged automatic job + termination, you cannot turn it off again. If you try to do this, the + request fails with an 'invalid property value' error response; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + Possible values include: 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param constraints: The execution constraints for the job. If omitted, the + existing execution constraints are left unchanged. + :type constraints: ~azure.batch.models.JobConstraints + :param pool_info: The pool on which the Batch service runs the job's + tasks. You may change the pool for a job only when the job is disabled. + The Patch Job call will fail if you include the poolInfo element and the + job is not disabled. If you specify an autoPoolSpecification specification + in the poolInfo, only the keepAlive property can be updated, and then only + if the auto pool has a poolLifetimeOption of job. If omitted, the job + continues to run on its current pool. + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with the job as + metadata. If omitted, the existing job metadata is left unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(JobPatchParameter, self).__init__(**kwargs) + self.priority = kwargs.get('priority', None) + self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) + self.constraints = kwargs.get('constraints', None) + self.pool_info = kwargs.get('pool_info', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_patch_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_patch_parameter_py3.py new file mode 100644 index 00000000..af6da2aa --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_patch_parameter_py3.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPatchParameter(Model): + """The set of changes to be made to a job. + + :param priority: The priority of the job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. If omitted, the priority of the job is left unchanged. + :type priority: int + :param on_all_tasks_complete: The action the Batch service should take + when all tasks in the job are in the completed state. If omitted, the + completion behavior is left unchanged. You may not change the value from + terminatejob to noaction - that is, once you have engaged automatic job + termination, you cannot turn it off again. If you try to do this, the + request fails with an 'invalid property value' error response; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + Possible values include: 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param constraints: The execution constraints for the job. If omitted, the + existing execution constraints are left unchanged. + :type constraints: ~azure.batch.models.JobConstraints + :param pool_info: The pool on which the Batch service runs the job's + tasks. You may change the pool for a job only when the job is disabled. + The Patch Job call will fail if you include the poolInfo element and the + job is not disabled. If you specify an autoPoolSpecification specification + in the poolInfo, only the keepAlive property can be updated, and then only + if the auto pool has a poolLifetimeOption of job. If omitted, the job + continues to run on its current pool. + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with the job as + metadata. If omitted, the existing job metadata is left unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, priority: int=None, on_all_tasks_complete=None, constraints=None, pool_info=None, metadata=None, **kwargs) -> None: + super(JobPatchParameter, self).__init__(**kwargs) + self.priority = priority + self.on_all_tasks_complete = on_all_tasks_complete + self.constraints = constraints + self.pool_info = pool_info + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_preparation_and_release_task_execution_information.py b/azext/generated/sdk/batch/v2018_12_01/models/job_preparation_and_release_task_execution_information.py new file mode 100644 index 00000000..44356460 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_preparation_and_release_task_execution_information.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationAndReleaseTaskExecutionInformation(Model): + """The status of the Job Preparation and Job Release tasks on a compute node. + + :param pool_id: The ID of the pool containing the compute node to which + this entry refers. + :type pool_id: str + :param node_id: The ID of the compute node to which this entry refers. + :type node_id: str + :param node_url: The URL of the compute node to which this entry refers. + :type node_url: str + :param job_preparation_task_execution_info: Information about the + execution status of the Job Preparation task on this compute node. + :type job_preparation_task_execution_info: + ~azure.batch.models.JobPreparationTaskExecutionInformation + :param job_release_task_execution_info: Information about the execution + status of the Job Release task on this compute node. This property is set + only if the Job Release task has run on the node. + :type job_release_task_execution_info: + ~azure.batch.models.JobReleaseTaskExecutionInformation + """ + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'node_id': {'key': 'nodeId', 'type': 'str'}, + 'node_url': {'key': 'nodeUrl', 'type': 'str'}, + 'job_preparation_task_execution_info': {'key': 'jobPreparationTaskExecutionInfo', 'type': 'JobPreparationTaskExecutionInformation'}, + 'job_release_task_execution_info': {'key': 'jobReleaseTaskExecutionInfo', 'type': 'JobReleaseTaskExecutionInformation'}, + } + + def __init__(self, **kwargs): + super(JobPreparationAndReleaseTaskExecutionInformation, self).__init__(**kwargs) + self.pool_id = kwargs.get('pool_id', None) + self.node_id = kwargs.get('node_id', None) + self.node_url = kwargs.get('node_url', None) + self.job_preparation_task_execution_info = kwargs.get('job_preparation_task_execution_info', None) + self.job_release_task_execution_info = kwargs.get('job_release_task_execution_info', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_preparation_and_release_task_execution_information_paged.py b/azext/generated/sdk/batch/v2018_12_01/models/job_preparation_and_release_task_execution_information_paged.py new file mode 100644 index 00000000..f1f7d3c4 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_preparation_and_release_task_execution_information_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class JobPreparationAndReleaseTaskExecutionInformationPaged(Paged): + """ + A paging container for iterating over a list of :class:`JobPreparationAndReleaseTaskExecutionInformation ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[JobPreparationAndReleaseTaskExecutionInformation]'} + } + + def __init__(self, *args, **kwargs): + + super(JobPreparationAndReleaseTaskExecutionInformationPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_preparation_and_release_task_execution_information_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_preparation_and_release_task_execution_information_py3.py new file mode 100644 index 00000000..35520702 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_preparation_and_release_task_execution_information_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationAndReleaseTaskExecutionInformation(Model): + """The status of the Job Preparation and Job Release tasks on a compute node. + + :param pool_id: The ID of the pool containing the compute node to which + this entry refers. + :type pool_id: str + :param node_id: The ID of the compute node to which this entry refers. + :type node_id: str + :param node_url: The URL of the compute node to which this entry refers. + :type node_url: str + :param job_preparation_task_execution_info: Information about the + execution status of the Job Preparation task on this compute node. + :type job_preparation_task_execution_info: + ~azure.batch.models.JobPreparationTaskExecutionInformation + :param job_release_task_execution_info: Information about the execution + status of the Job Release task on this compute node. This property is set + only if the Job Release task has run on the node. + :type job_release_task_execution_info: + ~azure.batch.models.JobReleaseTaskExecutionInformation + """ + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'node_id': {'key': 'nodeId', 'type': 'str'}, + 'node_url': {'key': 'nodeUrl', 'type': 'str'}, + 'job_preparation_task_execution_info': {'key': 'jobPreparationTaskExecutionInfo', 'type': 'JobPreparationTaskExecutionInformation'}, + 'job_release_task_execution_info': {'key': 'jobReleaseTaskExecutionInfo', 'type': 'JobReleaseTaskExecutionInformation'}, + } + + def __init__(self, *, pool_id: str=None, node_id: str=None, node_url: str=None, job_preparation_task_execution_info=None, job_release_task_execution_info=None, **kwargs) -> None: + super(JobPreparationAndReleaseTaskExecutionInformation, self).__init__(**kwargs) + self.pool_id = pool_id + self.node_id = node_id + self.node_url = node_url + self.job_preparation_task_execution_info = job_preparation_task_execution_info + self.job_release_task_execution_info = job_release_task_execution_info diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_preparation_task.py b/azext/generated/sdk/batch/v2018_12_01/models/job_preparation_task.py new file mode 100644 index 00000000..acf8da03 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_preparation_task.py @@ -0,0 +1,146 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationTask(Model): + """A Job Preparation task to run before any tasks of the job on any given + compute node. + + You can use Job Preparation to prepare a compute node to run tasks for the + job. Activities commonly performed in Job Preparation include: Downloading + common resource files used by all the tasks in the job. The Job Preparation + task can download these common resource files to the shared location on the + compute node. (AZ_BATCH_NODE_ROOT_DIR\shared), or starting a local service + on the compute node so that all tasks of that job can communicate with it. + If the Job Preparation task fails (that is, exhausts its retry count before + exiting with exit code 0), Batch will not run tasks of this job on the + compute node. The node remains ineligible to run tasks of this job until it + is reimaged. The node remains active and can be used for other jobs. The + Job Preparation task can run multiple times on the same compute node. + Therefore, you should write the Job Preparation task to handle + re-execution. If the compute node is rebooted, the Job Preparation task is + run again on the node before scheduling any other task of the job, if + rerunOnNodeRebootAfterSuccess is true or if the Job Preparation task did + not previously complete. If the compute node is reimaged, the Job + Preparation task is run again before scheduling any task of the job. Batch + will retry tasks when a recovery operation is triggered on a compute node. + Examples of recovery operations include (but are not limited to) when an + unhealthy compute node is rebooted or a compute node disappeared due to + host failure. Retries due to recovery operations are independent of and are + not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is + 0, an internal retry due to a recovery operation may occur. Because of + this, all tasks should be idempotent. This means tasks need to tolerate + being interrupted and restarted without causing any corruption or duplicate + data. The best practice for long running tasks is to use some form of + checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: A string that uniquely identifies the Job Preparation task + within the job. The ID can contain any combination of alphanumeric + characters including hyphens and underscores and cannot contain more than + 64 characters. If you do not specify this property, the Batch service + assigns a default value of 'jobpreparation'. No other task in the job can + have the same ID as the Job Preparation task. If you try to submit a task + with the same id, the Batch service rejects the request with error code + TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, + the HTTP status code is 409 (Conflict). + :type id: str + :param command_line: Required. The command line of the Job Preparation + task. The command line does not run under a shell, and therefore cannot + take advantage of shell features such as environment variable expansion. + If you want to take advantage of such features, you should invoke the + shell in the command line, for example using "cmd /c MyCommand" in Windows + or "/bin/sh -c MyCommand" in Linux. If the command line refers to file + paths, it should use a relative path (relative to the task working + directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Preparation task runs. When this is specified, all directories + recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch + directories on the node) are mapped into the container, all task + environment variables are mapped into the container, and the task command + line is executed in the container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. Files listed + under this element are located in the task's working directory. There is + a maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the Job Preparation task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param constraints: Constraints that apply to the Job Preparation task. + :type constraints: ~azure.batch.models.TaskConstraints + :param wait_for_success: Whether the Batch service should wait for the Job + Preparation task to complete successfully before scheduling any other + tasks of the job on the compute node. A Job Preparation task has completed + successfully if it exits with exit code 0. If true and the Job Preparation + task fails on a compute node, the Batch service retries the Job + Preparation task up to its maximum retry count (as specified in the + constraints element). If the task has still not completed successfully + after all retries, then the Batch service will not schedule tasks of the + job to the compute node. The compute node remains active and eligible to + run tasks of other jobs. If false, the Batch service will not wait for the + Job Preparation task to complete. In this case, other tasks of the job can + start executing on the compute node while the Job Preparation task is + still running; and even if the Job Preparation task fails, new tasks will + continue to be scheduled on the node. The default value is true. + :type wait_for_success: bool + :param user_identity: The user identity under which the Job Preparation + task runs. If omitted, the task runs as a non-administrative user unique + to the task on Windows nodes, or a non-administrative user unique to the + pool on Linux nodes. + :type user_identity: ~azure.batch.models.UserIdentity + :param rerun_on_node_reboot_after_success: Whether the Batch service + should rerun the Job Preparation task after a compute node reboots. The + Job Preparation task is always rerun if a compute node is reimaged, or if + the Job Preparation task did not complete (e.g. because the reboot + occurred while the task was running). Therefore, you should always write a + Job Preparation task to be idempotent and to behave correctly if run + multiple times. The default value is true. + :type rerun_on_node_reboot_after_success: bool + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'rerun_on_node_reboot_after_success': {'key': 'rerunOnNodeRebootAfterSuccess', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(JobPreparationTask, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.resource_files = kwargs.get('resource_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.constraints = kwargs.get('constraints', None) + self.wait_for_success = kwargs.get('wait_for_success', None) + self.user_identity = kwargs.get('user_identity', None) + self.rerun_on_node_reboot_after_success = kwargs.get('rerun_on_node_reboot_after_success', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_preparation_task_execution_information.py b/azext/generated/sdk/batch/v2018_12_01/models/job_preparation_task_execution_information.py new file mode 100644 index 00000000..f51b95a1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_preparation_task_execution_information.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationTaskExecutionInformation(Model): + """Contains information about the execution of a Job Preparation task on a + compute node. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The time at which the task started running. + If the task has been restarted or retried, this is the most recent time at + which the task started running. + :type start_time: datetime + :param end_time: The time at which the Job Preparation task completed. + This property is set only if the task is in the Completed state. + :type end_time: datetime + :param state: Required. The current state of the Job Preparation task on + the compute node. Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.JobPreparationTaskState + :param task_root_directory: The root directory of the Job Preparation task + on the compute node. You can use this path to retrieve files created by + the task, such as log files. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the Job + Preparation task on the compute node. + :type task_root_directory_url: str + :param exit_code: The exit code of the program specified on the task + command line. This parameter is returned only if the task is in the + completed state. The exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. Note + that the exit code may also be generated by the compute node operating + system, such as when a process is forcibly terminated. + :type exit_code: int + :param container_info: Information about the container under which the + task is executing. This property is set only if the task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the task failure, if any. This + property is set only if the task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the task could not be run) and + file upload errors are not retried. The Batch service will retry the task + up to the limit specified by the constraints. Task application failures + (non-zero exit code) are retried, pre-processing errors (the task could + not be run) and file upload errors are not retried. The Batch service will + retry the task up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the Job + Preparation task started running. This property is set only if the task + was retried (i.e. retryCount is nonzero). If present, this is typically + the same as startTime, but may be different if the task has been restarted + for reasons other than retry; for example, if the compute node was + rebooted during a retry, then the startTime is updated but the + lastRetryTime is not. + :type last_retry_time: datetime + :param result: The result of the task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'start_time': {'required': True}, + 'state': {'required': True}, + 'retry_count': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobPreparationTaskState'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, **kwargs): + super(JobPreparationTaskExecutionInformation, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.state = kwargs.get('state', None) + self.task_root_directory = kwargs.get('task_root_directory', None) + self.task_root_directory_url = kwargs.get('task_root_directory_url', None) + self.exit_code = kwargs.get('exit_code', None) + self.container_info = kwargs.get('container_info', None) + self.failure_info = kwargs.get('failure_info', None) + self.retry_count = kwargs.get('retry_count', None) + self.last_retry_time = kwargs.get('last_retry_time', None) + self.result = kwargs.get('result', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_preparation_task_execution_information_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_preparation_task_execution_information_py3.py new file mode 100644 index 00000000..36bd33f7 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_preparation_task_execution_information_py3.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationTaskExecutionInformation(Model): + """Contains information about the execution of a Job Preparation task on a + compute node. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The time at which the task started running. + If the task has been restarted or retried, this is the most recent time at + which the task started running. + :type start_time: datetime + :param end_time: The time at which the Job Preparation task completed. + This property is set only if the task is in the Completed state. + :type end_time: datetime + :param state: Required. The current state of the Job Preparation task on + the compute node. Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.JobPreparationTaskState + :param task_root_directory: The root directory of the Job Preparation task + on the compute node. You can use this path to retrieve files created by + the task, such as log files. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the Job + Preparation task on the compute node. + :type task_root_directory_url: str + :param exit_code: The exit code of the program specified on the task + command line. This parameter is returned only if the task is in the + completed state. The exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. Note + that the exit code may also be generated by the compute node operating + system, such as when a process is forcibly terminated. + :type exit_code: int + :param container_info: Information about the container under which the + task is executing. This property is set only if the task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the task failure, if any. This + property is set only if the task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the task could not be run) and + file upload errors are not retried. The Batch service will retry the task + up to the limit specified by the constraints. Task application failures + (non-zero exit code) are retried, pre-processing errors (the task could + not be run) and file upload errors are not retried. The Batch service will + retry the task up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the Job + Preparation task started running. This property is set only if the task + was retried (i.e. retryCount is nonzero). If present, this is typically + the same as startTime, but may be different if the task has been restarted + for reasons other than retry; for example, if the compute node was + rebooted during a retry, then the startTime is updated but the + lastRetryTime is not. + :type last_retry_time: datetime + :param result: The result of the task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'start_time': {'required': True}, + 'state': {'required': True}, + 'retry_count': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobPreparationTaskState'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, *, start_time, state, retry_count: int, end_time=None, task_root_directory: str=None, task_root_directory_url: str=None, exit_code: int=None, container_info=None, failure_info=None, last_retry_time=None, result=None, **kwargs) -> None: + super(JobPreparationTaskExecutionInformation, self).__init__(**kwargs) + self.start_time = start_time + self.end_time = end_time + self.state = state + self.task_root_directory = task_root_directory + self.task_root_directory_url = task_root_directory_url + self.exit_code = exit_code + self.container_info = container_info + self.failure_info = failure_info + self.retry_count = retry_count + self.last_retry_time = last_retry_time + self.result = result diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_preparation_task_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_preparation_task_py3.py new file mode 100644 index 00000000..93e99d37 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_preparation_task_py3.py @@ -0,0 +1,146 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationTask(Model): + """A Job Preparation task to run before any tasks of the job on any given + compute node. + + You can use Job Preparation to prepare a compute node to run tasks for the + job. Activities commonly performed in Job Preparation include: Downloading + common resource files used by all the tasks in the job. The Job Preparation + task can download these common resource files to the shared location on the + compute node. (AZ_BATCH_NODE_ROOT_DIR\shared), or starting a local service + on the compute node so that all tasks of that job can communicate with it. + If the Job Preparation task fails (that is, exhausts its retry count before + exiting with exit code 0), Batch will not run tasks of this job on the + compute node. The node remains ineligible to run tasks of this job until it + is reimaged. The node remains active and can be used for other jobs. The + Job Preparation task can run multiple times on the same compute node. + Therefore, you should write the Job Preparation task to handle + re-execution. If the compute node is rebooted, the Job Preparation task is + run again on the node before scheduling any other task of the job, if + rerunOnNodeRebootAfterSuccess is true or if the Job Preparation task did + not previously complete. If the compute node is reimaged, the Job + Preparation task is run again before scheduling any task of the job. Batch + will retry tasks when a recovery operation is triggered on a compute node. + Examples of recovery operations include (but are not limited to) when an + unhealthy compute node is rebooted or a compute node disappeared due to + host failure. Retries due to recovery operations are independent of and are + not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is + 0, an internal retry due to a recovery operation may occur. Because of + this, all tasks should be idempotent. This means tasks need to tolerate + being interrupted and restarted without causing any corruption or duplicate + data. The best practice for long running tasks is to use some form of + checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: A string that uniquely identifies the Job Preparation task + within the job. The ID can contain any combination of alphanumeric + characters including hyphens and underscores and cannot contain more than + 64 characters. If you do not specify this property, the Batch service + assigns a default value of 'jobpreparation'. No other task in the job can + have the same ID as the Job Preparation task. If you try to submit a task + with the same id, the Batch service rejects the request with error code + TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, + the HTTP status code is 409 (Conflict). + :type id: str + :param command_line: Required. The command line of the Job Preparation + task. The command line does not run under a shell, and therefore cannot + take advantage of shell features such as environment variable expansion. + If you want to take advantage of such features, you should invoke the + shell in the command line, for example using "cmd /c MyCommand" in Windows + or "/bin/sh -c MyCommand" in Linux. If the command line refers to file + paths, it should use a relative path (relative to the task working + directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Preparation task runs. When this is specified, all directories + recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch + directories on the node) are mapped into the container, all task + environment variables are mapped into the container, and the task command + line is executed in the container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. Files listed + under this element are located in the task's working directory. There is + a maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the Job Preparation task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param constraints: Constraints that apply to the Job Preparation task. + :type constraints: ~azure.batch.models.TaskConstraints + :param wait_for_success: Whether the Batch service should wait for the Job + Preparation task to complete successfully before scheduling any other + tasks of the job on the compute node. A Job Preparation task has completed + successfully if it exits with exit code 0. If true and the Job Preparation + task fails on a compute node, the Batch service retries the Job + Preparation task up to its maximum retry count (as specified in the + constraints element). If the task has still not completed successfully + after all retries, then the Batch service will not schedule tasks of the + job to the compute node. The compute node remains active and eligible to + run tasks of other jobs. If false, the Batch service will not wait for the + Job Preparation task to complete. In this case, other tasks of the job can + start executing on the compute node while the Job Preparation task is + still running; and even if the Job Preparation task fails, new tasks will + continue to be scheduled on the node. The default value is true. + :type wait_for_success: bool + :param user_identity: The user identity under which the Job Preparation + task runs. If omitted, the task runs as a non-administrative user unique + to the task on Windows nodes, or a non-administrative user unique to the + pool on Linux nodes. + :type user_identity: ~azure.batch.models.UserIdentity + :param rerun_on_node_reboot_after_success: Whether the Batch service + should rerun the Job Preparation task after a compute node reboots. The + Job Preparation task is always rerun if a compute node is reimaged, or if + the Job Preparation task did not complete (e.g. because the reboot + occurred while the task was running). Therefore, you should always write a + Job Preparation task to be idempotent and to behave correctly if run + multiple times. The default value is true. + :type rerun_on_node_reboot_after_success: bool + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'rerun_on_node_reboot_after_success': {'key': 'rerunOnNodeRebootAfterSuccess', 'type': 'bool'}, + } + + def __init__(self, *, command_line: str, id: str=None, container_settings=None, resource_files=None, environment_settings=None, constraints=None, wait_for_success: bool=None, user_identity=None, rerun_on_node_reboot_after_success: bool=None, **kwargs) -> None: + super(JobPreparationTask, self).__init__(**kwargs) + self.id = id + self.command_line = command_line + self.container_settings = container_settings + self.resource_files = resource_files + self.environment_settings = environment_settings + self.constraints = constraints + self.wait_for_success = wait_for_success + self.user_identity = user_identity + self.rerun_on_node_reboot_after_success = rerun_on_node_reboot_after_success diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_release_task.py b/azext/generated/sdk/batch/v2018_12_01/models/job_release_task.py new file mode 100644 index 00000000..32b97e5c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_release_task.py @@ -0,0 +1,121 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobReleaseTask(Model): + """A Job Release task to run on job completion on any compute node where the + job has run. + + The Job Release task runs when the job ends, because of one of the + following: The user calls the Terminate Job API, or the Delete Job API + while the job is still active, the job's maximum wall clock time constraint + is reached, and the job is still active, or the job's Job Manager task + completed, and the job is configured to terminate when the Job Manager + completes. The Job Release task runs on each compute node where tasks of + the job have run and the Job Preparation task ran and completed. If you + reimage a compute node after it has run the Job Preparation task, and the + job ends without any further tasks of the job running on that compute node + (and hence the Job Preparation task does not re-run), then the Job Release + task does not run on that node. If a compute node reboots while the Job + Release task is still running, the Job Release task runs again when the + compute node starts up. The job is not marked as complete until all Job + Release tasks have completed. The Job Release task runs in the background. + It does not occupy a scheduling slot; that is, it does not count towards + the maxTasksPerNode limit specified on the pool. + + All required parameters must be populated in order to send to Azure. + + :param id: A string that uniquely identifies the Job Release task within + the job. The ID can contain any combination of alphanumeric characters + including hyphens and underscores and cannot contain more than 64 + characters. If you do not specify this property, the Batch service assigns + a default value of 'jobrelease'. No other task in the job can have the + same ID as the Job Release task. If you try to submit a task with the same + id, the Batch service rejects the request with error code + TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the + HTTP status code is 409 (Conflict). + :type id: str + :param command_line: Required. The command line of the Job Release task. + The command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Release task runs. When this is specified, all directories recursively + below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on + the node) are mapped into the container, all task environment variables + are mapped into the container, and the task command line is executed in + the container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. Files listed under this + element are located in the task's working directory. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the Job Release task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param max_wall_clock_time: The maximum elapsed time that the Job Release + task may run on a given compute node, measured from the time the task + starts. If the task does not complete within the time limit, the Batch + service terminates it. The default value is 15 minutes. You may not + specify a timeout longer than 15 minutes. If you do, the Batch service + rejects it with an error; if you are calling the REST API directly, the + HTTP status code is 400 (Bad Request). + :type max_wall_clock_time: timedelta + :param retention_time: The minimum time to retain the task directory for + the Job Release task on the compute node. After this time, the Batch + service may delete the task directory and all its contents. The default is + 7 days, i.e. the task directory will be retained for 7 days unless the + compute node is removed or the job is deleted. + :type retention_time: timedelta + :param user_identity: The user identity under which the Job Release task + runs. If omitted, the task runs as a non-administrative user unique to the + task. + :type user_identity: ~azure.batch.models.UserIdentity + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'retention_time': {'key': 'retentionTime', 'type': 'duration'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + } + + def __init__(self, **kwargs): + super(JobReleaseTask, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.resource_files = kwargs.get('resource_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.max_wall_clock_time = kwargs.get('max_wall_clock_time', None) + self.retention_time = kwargs.get('retention_time', None) + self.user_identity = kwargs.get('user_identity', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_release_task_execution_information.py b/azext/generated/sdk/batch/v2018_12_01/models/job_release_task_execution_information.py new file mode 100644 index 00000000..0ccb4f64 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_release_task_execution_information.py @@ -0,0 +1,89 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobReleaseTaskExecutionInformation(Model): + """Contains information about the execution of a Job Release task on a compute + node. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The time at which the task started running. + If the task has been restarted or retried, this is the most recent time at + which the task started running. + :type start_time: datetime + :param end_time: The time at which the Job Release task completed. This + property is set only if the task is in the Completed state. + :type end_time: datetime + :param state: Required. The current state of the Job Release task on the + compute node. Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.JobReleaseTaskState + :param task_root_directory: The root directory of the Job Release task on + the compute node. You can use this path to retrieve files created by the + task, such as log files. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the Job + Release task on the compute node. + :type task_root_directory_url: str + :param exit_code: The exit code of the program specified on the task + command line. This parameter is returned only if the task is in the + completed state. The exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. Note + that the exit code may also be generated by the compute node operating + system, such as when a process is forcibly terminated. + :type exit_code: int + :param container_info: Information about the container under which the + task is executing. This property is set only if the task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the task failure, if any. This + property is set only if the task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param result: The result of the task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'start_time': {'required': True}, + 'state': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobReleaseTaskState'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, **kwargs): + super(JobReleaseTaskExecutionInformation, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.state = kwargs.get('state', None) + self.task_root_directory = kwargs.get('task_root_directory', None) + self.task_root_directory_url = kwargs.get('task_root_directory_url', None) + self.exit_code = kwargs.get('exit_code', None) + self.container_info = kwargs.get('container_info', None) + self.failure_info = kwargs.get('failure_info', None) + self.result = kwargs.get('result', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_release_task_execution_information_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_release_task_execution_information_py3.py new file mode 100644 index 00000000..ed08089b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_release_task_execution_information_py3.py @@ -0,0 +1,89 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobReleaseTaskExecutionInformation(Model): + """Contains information about the execution of a Job Release task on a compute + node. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The time at which the task started running. + If the task has been restarted or retried, this is the most recent time at + which the task started running. + :type start_time: datetime + :param end_time: The time at which the Job Release task completed. This + property is set only if the task is in the Completed state. + :type end_time: datetime + :param state: Required. The current state of the Job Release task on the + compute node. Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.JobReleaseTaskState + :param task_root_directory: The root directory of the Job Release task on + the compute node. You can use this path to retrieve files created by the + task, such as log files. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the Job + Release task on the compute node. + :type task_root_directory_url: str + :param exit_code: The exit code of the program specified on the task + command line. This parameter is returned only if the task is in the + completed state. The exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. Note + that the exit code may also be generated by the compute node operating + system, such as when a process is forcibly terminated. + :type exit_code: int + :param container_info: Information about the container under which the + task is executing. This property is set only if the task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the task failure, if any. This + property is set only if the task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param result: The result of the task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'start_time': {'required': True}, + 'state': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobReleaseTaskState'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, *, start_time, state, end_time=None, task_root_directory: str=None, task_root_directory_url: str=None, exit_code: int=None, container_info=None, failure_info=None, result=None, **kwargs) -> None: + super(JobReleaseTaskExecutionInformation, self).__init__(**kwargs) + self.start_time = start_time + self.end_time = end_time + self.state = state + self.task_root_directory = task_root_directory + self.task_root_directory_url = task_root_directory_url + self.exit_code = exit_code + self.container_info = container_info + self.failure_info = failure_info + self.result = result diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_release_task_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_release_task_py3.py new file mode 100644 index 00000000..a12fbc8f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_release_task_py3.py @@ -0,0 +1,121 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobReleaseTask(Model): + """A Job Release task to run on job completion on any compute node where the + job has run. + + The Job Release task runs when the job ends, because of one of the + following: The user calls the Terminate Job API, or the Delete Job API + while the job is still active, the job's maximum wall clock time constraint + is reached, and the job is still active, or the job's Job Manager task + completed, and the job is configured to terminate when the Job Manager + completes. The Job Release task runs on each compute node where tasks of + the job have run and the Job Preparation task ran and completed. If you + reimage a compute node after it has run the Job Preparation task, and the + job ends without any further tasks of the job running on that compute node + (and hence the Job Preparation task does not re-run), then the Job Release + task does not run on that node. If a compute node reboots while the Job + Release task is still running, the Job Release task runs again when the + compute node starts up. The job is not marked as complete until all Job + Release tasks have completed. The Job Release task runs in the background. + It does not occupy a scheduling slot; that is, it does not count towards + the maxTasksPerNode limit specified on the pool. + + All required parameters must be populated in order to send to Azure. + + :param id: A string that uniquely identifies the Job Release task within + the job. The ID can contain any combination of alphanumeric characters + including hyphens and underscores and cannot contain more than 64 + characters. If you do not specify this property, the Batch service assigns + a default value of 'jobrelease'. No other task in the job can have the + same ID as the Job Release task. If you try to submit a task with the same + id, the Batch service rejects the request with error code + TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the + HTTP status code is 409 (Conflict). + :type id: str + :param command_line: Required. The command line of the Job Release task. + The command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Release task runs. When this is specified, all directories recursively + below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on + the node) are mapped into the container, all task environment variables + are mapped into the container, and the task command line is executed in + the container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. Files listed under this + element are located in the task's working directory. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the Job Release task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param max_wall_clock_time: The maximum elapsed time that the Job Release + task may run on a given compute node, measured from the time the task + starts. If the task does not complete within the time limit, the Batch + service terminates it. The default value is 15 minutes. You may not + specify a timeout longer than 15 minutes. If you do, the Batch service + rejects it with an error; if you are calling the REST API directly, the + HTTP status code is 400 (Bad Request). + :type max_wall_clock_time: timedelta + :param retention_time: The minimum time to retain the task directory for + the Job Release task on the compute node. After this time, the Batch + service may delete the task directory and all its contents. The default is + 7 days, i.e. the task directory will be retained for 7 days unless the + compute node is removed or the job is deleted. + :type retention_time: timedelta + :param user_identity: The user identity under which the Job Release task + runs. If omitted, the task runs as a non-administrative user unique to the + task. + :type user_identity: ~azure.batch.models.UserIdentity + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'retention_time': {'key': 'retentionTime', 'type': 'duration'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + } + + def __init__(self, *, command_line: str, id: str=None, container_settings=None, resource_files=None, environment_settings=None, max_wall_clock_time=None, retention_time=None, user_identity=None, **kwargs) -> None: + super(JobReleaseTask, self).__init__(**kwargs) + self.id = id + self.command_line = command_line + self.container_settings = container_settings + self.resource_files = resource_files + self.environment_settings = environment_settings + self.max_wall_clock_time = max_wall_clock_time + self.retention_time = retention_time + self.user_identity = user_identity diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_add_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_add_options.py new file mode 100644 index 00000000..6c03aaff --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_add_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleAddOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_add_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_add_options_py3.py new file mode 100644 index 00000000..fe7b76cc --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_add_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobScheduleAddOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_add_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_add_parameter.py new file mode 100644 index 00000000..e0d8d724 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_add_parameter.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleAddParameter(Model): + """A job schedule that allows recurring jobs by specifying when to run jobs + and a specification used to create each job. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the schedule within + the account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within an account that differ only by case). + :type id: str + :param display_name: The display name for the schedule. The display name + need not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param schedule: Required. The schedule according to which jobs will be + created. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: Required. The details of the jobs to be created + on this schedule. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the schedule + as metadata. The Batch service does not assign any meaning to metadata; it + is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'id': {'required': True}, + 'schedule': {'required': True}, + 'job_specification': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(JobScheduleAddParameter, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.schedule = kwargs.get('schedule', None) + self.job_specification = kwargs.get('job_specification', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_add_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_add_parameter_py3.py new file mode 100644 index 00000000..9281c765 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_add_parameter_py3.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleAddParameter(Model): + """A job schedule that allows recurring jobs by specifying when to run jobs + and a specification used to create each job. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the schedule within + the account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within an account that differ only by case). + :type id: str + :param display_name: The display name for the schedule. The display name + need not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param schedule: Required. The schedule according to which jobs will be + created. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: Required. The details of the jobs to be created + on this schedule. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the schedule + as metadata. The Batch service does not assign any meaning to metadata; it + is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'id': {'required': True}, + 'schedule': {'required': True}, + 'job_specification': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, id: str, schedule, job_specification, display_name: str=None, metadata=None, **kwargs) -> None: + super(JobScheduleAddParameter, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.schedule = schedule + self.job_specification = job_specification + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_delete_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_delete_options.py new file mode 100644 index 00000000..a7e01118 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_delete_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleDeleteOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_delete_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_delete_options_py3.py new file mode 100644 index 00000000..89ae9986 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_delete_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleDeleteOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_disable_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_disable_options.py new file mode 100644 index 00000000..9384c1fb --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_disable_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleDisableOptions(Model): + """Additional parameters for disable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleDisableOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_disable_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_disable_options_py3.py new file mode 100644 index 00000000..83adbe53 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_disable_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleDisableOptions(Model): + """Additional parameters for disable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleDisableOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_enable_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_enable_options.py new file mode 100644 index 00000000..a296d530 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_enable_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleEnableOptions(Model): + """Additional parameters for enable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleEnableOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_enable_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_enable_options_py3.py new file mode 100644 index 00000000..daa4d087 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_enable_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleEnableOptions(Model): + """Additional parameters for enable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleEnableOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_execution_information.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_execution_information.py new file mode 100644 index 00000000..b79a4e81 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_execution_information.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleExecutionInformation(Model): + """Contains information about jobs that have been and will be run under a job + schedule. + + :param next_run_time: The next time at which a job will be created under + this schedule. This property is meaningful only if the schedule is in the + active state when the time comes around. For example, if the schedule is + disabled, no job will be created at nextRunTime unless the job is enabled + before then. + :type next_run_time: datetime + :param recent_job: Information about the most recent job under the job + schedule. This property is present only if the at least one job has run + under the schedule. + :type recent_job: ~azure.batch.models.RecentJob + :param end_time: The time at which the schedule ended. This property is + set only if the job schedule is in the completed state. + :type end_time: datetime + """ + + _attribute_map = { + 'next_run_time': {'key': 'nextRunTime', 'type': 'iso-8601'}, + 'recent_job': {'key': 'recentJob', 'type': 'RecentJob'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(JobScheduleExecutionInformation, self).__init__(**kwargs) + self.next_run_time = kwargs.get('next_run_time', None) + self.recent_job = kwargs.get('recent_job', None) + self.end_time = kwargs.get('end_time', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_execution_information_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_execution_information_py3.py new file mode 100644 index 00000000..6afcaa38 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_execution_information_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleExecutionInformation(Model): + """Contains information about jobs that have been and will be run under a job + schedule. + + :param next_run_time: The next time at which a job will be created under + this schedule. This property is meaningful only if the schedule is in the + active state when the time comes around. For example, if the schedule is + disabled, no job will be created at nextRunTime unless the job is enabled + before then. + :type next_run_time: datetime + :param recent_job: Information about the most recent job under the job + schedule. This property is present only if the at least one job has run + under the schedule. + :type recent_job: ~azure.batch.models.RecentJob + :param end_time: The time at which the schedule ended. This property is + set only if the job schedule is in the completed state. + :type end_time: datetime + """ + + _attribute_map = { + 'next_run_time': {'key': 'nextRunTime', 'type': 'iso-8601'}, + 'recent_job': {'key': 'recentJob', 'type': 'RecentJob'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + } + + def __init__(self, *, next_run_time=None, recent_job=None, end_time=None, **kwargs) -> None: + super(JobScheduleExecutionInformation, self).__init__(**kwargs) + self.next_run_time = next_run_time + self.recent_job = recent_job + self.end_time = end_time diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_exists_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_exists_options.py new file mode 100644 index 00000000..c4f228d7 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_exists_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleExistsOptions(Model): + """Additional parameters for exists operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleExistsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_exists_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_exists_options_py3.py new file mode 100644 index 00000000..da8e15d2 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_exists_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleExistsOptions(Model): + """Additional parameters for exists operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleExistsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_get_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_get_options.py new file mode 100644 index 00000000..434b0ab1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_get_options.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_get_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_get_options_py3.py new file mode 100644 index 00000000..11ee540f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_get_options_py3.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleGetOptions, self).__init__(**kwargs) + self.select = select + self.expand = expand + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_list_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_list_options.py new file mode 100644 index 00000000..28af3945 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_list_options.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 job schedules can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_list_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_list_options_py3.py new file mode 100644 index 00000000..017cdb10 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_list_options_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 job schedules can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobScheduleListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.expand = expand + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_patch_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_patch_options.py new file mode 100644 index 00000000..841e56e9 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_patch_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulePatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobSchedulePatchOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_patch_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_patch_options_py3.py new file mode 100644 index 00000000..06e4f626 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_patch_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulePatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobSchedulePatchOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_patch_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_patch_parameter.py new file mode 100644 index 00000000..24a074bf --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_patch_parameter.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulePatchParameter(Model): + """The set of changes to be made to a job schedule. + + :param schedule: The schedule according to which jobs will be created. If + you do not specify this element, the existing schedule is left unchanged. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: The details of the jobs to be created on this + schedule. Updates affect only jobs that are started after the update has + taken place. Any currently active job continues with the older + specification. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the job + schedule as metadata. If you do not specify this element, existing + metadata is left unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(JobSchedulePatchParameter, self).__init__(**kwargs) + self.schedule = kwargs.get('schedule', None) + self.job_specification = kwargs.get('job_specification', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_patch_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_patch_parameter_py3.py new file mode 100644 index 00000000..4102022b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_patch_parameter_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulePatchParameter(Model): + """The set of changes to be made to a job schedule. + + :param schedule: The schedule according to which jobs will be created. If + you do not specify this element, the existing schedule is left unchanged. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: The details of the jobs to be created on this + schedule. Updates affect only jobs that are started after the update has + taken place. Any currently active job continues with the older + specification. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the job + schedule as metadata. If you do not specify this element, existing + metadata is left unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, schedule=None, job_specification=None, metadata=None, **kwargs) -> None: + super(JobSchedulePatchParameter, self).__init__(**kwargs) + self.schedule = schedule + self.job_specification = job_specification + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_statistics.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_statistics.py new file mode 100644 index 00000000..ebf2e3e2 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_statistics.py @@ -0,0 +1,126 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleStatistics(Model): + """Resource usage statistics for a job schedule. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all compute nodes) consumed by all tasks in all jobs + created under the schedule. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all compute nodes) consumed by all tasks in all jobs + created under the schedule. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of all the + tasks in all the jobs created under the schedule. The wall clock time is + the elapsed time from when the task started running on a compute node to + when it finished (or to the last time the statistics were updated, if the + task had not finished by then). If a task was retried, this includes the + wall clock time of all the task retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by all tasks in all jobs created under the schedule. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by all tasks in all jobs created under the schedule. + :type write_iops: long + :param read_io_gi_b: Required. The total gibibytes read from disk by all + tasks in all jobs created under the schedule. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total gibibytes written to disk by all + tasks in all jobs created under the schedule. + :type write_io_gi_b: float + :param num_succeeded_tasks: Required. The total number of tasks + successfully completed during the given time range in jobs created under + the schedule. A task completes successfully if it returns exit code 0. + :type num_succeeded_tasks: long + :param num_failed_tasks: Required. The total number of tasks that failed + during the given time range in jobs created under the schedule. A task + fails if it exhausts its maximum retry count without returning exit code + 0. + :type num_failed_tasks: long + :param num_task_retries: Required. The total number of retries during the + given time range on all tasks in all jobs created under the schedule. + :type num_task_retries: long + :param wait_time: Required. The total wait time of all tasks in all jobs + created under the schedule. The wait time for a task is defined as the + elapsed time between the creation of the task and the start of task + execution. (If the task is retried due to failures, the wait time is the + time to the most recent task execution.). This value is only reported in + the account lifetime statistics; it is not included in the job statistics. + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'num_succeeded_tasks': {'required': True}, + 'num_failed_tasks': {'required': True}, + 'num_task_retries': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'num_succeeded_tasks': {'key': 'numSucceededTasks', 'type': 'long'}, + 'num_failed_tasks': {'key': 'numFailedTasks', 'type': 'long'}, + 'num_task_retries': {'key': 'numTaskRetries', 'type': 'long'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(JobScheduleStatistics, self).__init__(**kwargs) + self.url = kwargs.get('url', None) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.user_cpu_time = kwargs.get('user_cpu_time', None) + self.kernel_cpu_time = kwargs.get('kernel_cpu_time', None) + self.wall_clock_time = kwargs.get('wall_clock_time', None) + self.read_iops = kwargs.get('read_iops', None) + self.write_iops = kwargs.get('write_iops', None) + self.read_io_gi_b = kwargs.get('read_io_gi_b', None) + self.write_io_gi_b = kwargs.get('write_io_gi_b', None) + self.num_succeeded_tasks = kwargs.get('num_succeeded_tasks', None) + self.num_failed_tasks = kwargs.get('num_failed_tasks', None) + self.num_task_retries = kwargs.get('num_task_retries', None) + self.wait_time = kwargs.get('wait_time', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_statistics_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_statistics_py3.py new file mode 100644 index 00000000..d335aa94 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_statistics_py3.py @@ -0,0 +1,126 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleStatistics(Model): + """Resource usage statistics for a job schedule. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all compute nodes) consumed by all tasks in all jobs + created under the schedule. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all compute nodes) consumed by all tasks in all jobs + created under the schedule. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of all the + tasks in all the jobs created under the schedule. The wall clock time is + the elapsed time from when the task started running on a compute node to + when it finished (or to the last time the statistics were updated, if the + task had not finished by then). If a task was retried, this includes the + wall clock time of all the task retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by all tasks in all jobs created under the schedule. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by all tasks in all jobs created under the schedule. + :type write_iops: long + :param read_io_gi_b: Required. The total gibibytes read from disk by all + tasks in all jobs created under the schedule. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total gibibytes written to disk by all + tasks in all jobs created under the schedule. + :type write_io_gi_b: float + :param num_succeeded_tasks: Required. The total number of tasks + successfully completed during the given time range in jobs created under + the schedule. A task completes successfully if it returns exit code 0. + :type num_succeeded_tasks: long + :param num_failed_tasks: Required. The total number of tasks that failed + during the given time range in jobs created under the schedule. A task + fails if it exhausts its maximum retry count without returning exit code + 0. + :type num_failed_tasks: long + :param num_task_retries: Required. The total number of retries during the + given time range on all tasks in all jobs created under the schedule. + :type num_task_retries: long + :param wait_time: Required. The total wait time of all tasks in all jobs + created under the schedule. The wait time for a task is defined as the + elapsed time between the creation of the task and the start of task + execution. (If the task is retried due to failures, the wait time is the + time to the most recent task execution.). This value is only reported in + the account lifetime statistics; it is not included in the job statistics. + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'num_succeeded_tasks': {'required': True}, + 'num_failed_tasks': {'required': True}, + 'num_task_retries': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'num_succeeded_tasks': {'key': 'numSucceededTasks', 'type': 'long'}, + 'num_failed_tasks': {'key': 'numFailedTasks', 'type': 'long'}, + 'num_task_retries': {'key': 'numTaskRetries', 'type': 'long'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, *, url: str, start_time, last_update_time, user_cpu_time, kernel_cpu_time, wall_clock_time, read_iops: int, write_iops: int, read_io_gi_b: float, write_io_gi_b: float, num_succeeded_tasks: int, num_failed_tasks: int, num_task_retries: int, wait_time, **kwargs) -> None: + super(JobScheduleStatistics, self).__init__(**kwargs) + self.url = url + self.start_time = start_time + self.last_update_time = last_update_time + self.user_cpu_time = user_cpu_time + self.kernel_cpu_time = kernel_cpu_time + self.wall_clock_time = wall_clock_time + self.read_iops = read_iops + self.write_iops = write_iops + self.read_io_gi_b = read_io_gi_b + self.write_io_gi_b = write_io_gi_b + self.num_succeeded_tasks = num_succeeded_tasks + self.num_failed_tasks = num_failed_tasks + self.num_task_retries = num_task_retries + self.wait_time = wait_time diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_terminate_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_terminate_options.py new file mode 100644 index 00000000..32a6f0d7 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_terminate_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleTerminateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_terminate_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_terminate_options_py3.py new file mode 100644 index 00000000..54789876 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_terminate_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleTerminateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_update_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_update_options.py new file mode 100644 index 00000000..ca3de898 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_update_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleUpdateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_update_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_update_options_py3.py new file mode 100644 index 00000000..aee92988 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_update_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleUpdateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_update_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_update_parameter.py new file mode 100644 index 00000000..757ef299 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_update_parameter.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleUpdateParameter(Model): + """The set of changes to be made to a job schedule. + + All required parameters must be populated in order to send to Azure. + + :param schedule: Required. The schedule according to which jobs will be + created. If you do not specify this element, it is equivalent to passing + the default schedule: that is, a single job scheduled to run immediately. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: Required. Details of the jobs to be created on + this schedule. Updates affect only jobs that are started after the update + has taken place. Any currently active job continues with the older + specification. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the job + schedule as metadata. If you do not specify this element, it takes the + default value of an empty list; in effect, any existing metadata is + deleted. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'schedule': {'required': True}, + 'job_specification': {'required': True}, + } + + _attribute_map = { + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(JobScheduleUpdateParameter, self).__init__(**kwargs) + self.schedule = kwargs.get('schedule', None) + self.job_specification = kwargs.get('job_specification', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_update_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_update_parameter_py3.py new file mode 100644 index 00000000..b3b216ee --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_schedule_update_parameter_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleUpdateParameter(Model): + """The set of changes to be made to a job schedule. + + All required parameters must be populated in order to send to Azure. + + :param schedule: Required. The schedule according to which jobs will be + created. If you do not specify this element, it is equivalent to passing + the default schedule: that is, a single job scheduled to run immediately. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: Required. Details of the jobs to be created on + this schedule. Updates affect only jobs that are started after the update + has taken place. Any currently active job continues with the older + specification. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the job + schedule as metadata. If you do not specify this element, it takes the + default value of an empty list; in effect, any existing metadata is + deleted. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'schedule': {'required': True}, + 'job_specification': {'required': True}, + } + + _attribute_map = { + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, schedule, job_specification, metadata=None, **kwargs) -> None: + super(JobScheduleUpdateParameter, self).__init__(**kwargs) + self.schedule = schedule + self.job_specification = job_specification + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_scheduling_error.py b/azext/generated/sdk/batch/v2018_12_01/models/job_scheduling_error.py new file mode 100644 index 00000000..1869114d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_scheduling_error.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulingError(Model): + """An error encountered by the Batch service when scheduling a job. + + All required parameters must be populated in order to send to Azure. + + :param category: Required. The category of the job scheduling error. + Possible values include: 'userError', 'serverError' + :type category: str or ~azure.batch.models.ErrorCategory + :param code: An identifier for the job scheduling error. Codes are + invariant and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the job scheduling error, intended to + be suitable for display in a user interface. + :type message: str + :param details: A list of additional error details related to the + scheduling error. + :type details: list[~azure.batch.models.NameValuePair] + """ + + _validation = { + 'category': {'required': True}, + } + + _attribute_map = { + 'category': {'key': 'category', 'type': 'ErrorCategory'}, + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(JobSchedulingError, self).__init__(**kwargs) + self.category = kwargs.get('category', None) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.details = kwargs.get('details', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_scheduling_error_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_scheduling_error_py3.py new file mode 100644 index 00000000..c12cb339 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_scheduling_error_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulingError(Model): + """An error encountered by the Batch service when scheduling a job. + + All required parameters must be populated in order to send to Azure. + + :param category: Required. The category of the job scheduling error. + Possible values include: 'userError', 'serverError' + :type category: str or ~azure.batch.models.ErrorCategory + :param code: An identifier for the job scheduling error. Codes are + invariant and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the job scheduling error, intended to + be suitable for display in a user interface. + :type message: str + :param details: A list of additional error details related to the + scheduling error. + :type details: list[~azure.batch.models.NameValuePair] + """ + + _validation = { + 'category': {'required': True}, + } + + _attribute_map = { + 'category': {'key': 'category', 'type': 'ErrorCategory'}, + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, category, code: str=None, message: str=None, details=None, **kwargs) -> None: + super(JobSchedulingError, self).__init__(**kwargs) + self.category = category + self.code = code + self.message = message + self.details = details diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_specification.py b/azext/generated/sdk/batch/v2018_12_01/models/job_specification.py new file mode 100644 index 00000000..ebea9ad0 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_specification.py @@ -0,0 +1,129 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSpecification(Model): + """Specifies details of the jobs to be created on a schedule. + + All required parameters must be populated in order to send to Azure. + + :param priority: The priority of jobs created under this schedule. + Priority values can range from -1000 to 1000, with -1000 being the lowest + priority and 1000 being the highest priority. The default value is 0. This + priority is used as the default for all jobs under the job schedule. You + can update a job's priority after it has been created using by using the + update job API. + :type priority: int + :param display_name: The display name for jobs created under this + schedule. The name need not be unique and can contain any Unicode + characters up to a maximum length of 1024. + :type display_name: str + :param uses_task_dependencies: Whether tasks in the job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + :param on_all_tasks_complete: The action the Batch service should take + when all tasks in a job created under this schedule are in the completed + state. Note that if a job contains no tasks, then all tasks are considered + complete. This option is therefore most commonly used with a Job Manager + task; if you want to use automatic job termination without a Job Manager, + you should initially set onAllTasksComplete to noaction and update the job + properties to set onAllTasksComplete to terminatejob once you have + finished adding tasks. The default is noaction. Possible values include: + 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + task fails in a job created under this schedule. A task is considered to + have failed if it have failed if has a failureInfo. A failureInfo is set + if the task completes with a non-zero exit code after exhausting its retry + count, or if there was an error starting the task, for example due to a + resource file download error. The default is noaction. Possible values + include: 'noAction', 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param network_configuration: The network configuration for the job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration + :param constraints: The execution constraints for jobs created under this + schedule. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: The details of a Job Manager task to be launched + when a job is started under this schedule. If the job does not specify a + Job Manager task, the user must explicitly add tasks to the job using the + Task API. If the job does specify a Job Manager task, the Batch service + creates the Job Manager task when the job is created, and will try to + schedule the Job Manager task before scheduling other tasks in the job. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation task for jobs created + under this schedule. If a job has a Job Preparation task, the Batch + service will run the Job Preparation task on a compute node before + starting any tasks of that job on that compute node. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release task for jobs created under this + schedule. The primary purpose of the Job Release task is to undo changes + to compute nodes made by the Job Preparation task. Example activities + include deleting local files, or shutting down services that were started + as part of job preparation. A Job Release task cannot be specified without + also specifying a Job Preparation task for the job. The Batch service runs + the Job Release task on the compute nodes that have run the Job + Preparation task. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: A list of common environment variable + settings. These environment variables are set for all tasks in jobs + created under this schedule (including the Job Manager, Job Preparation + and Job Release tasks). Individual tasks can override an environment + setting specified here by specifying the same setting name with a + different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: Required. The pool on which the Batch service runs the + tasks of jobs created under this schedule. + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with each job + created under this schedule as metadata. The Batch service does not assign + any meaning to metadata; it is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(JobSpecification, self).__init__(**kwargs) + self.priority = kwargs.get('priority', None) + self.display_name = kwargs.get('display_name', None) + self.uses_task_dependencies = kwargs.get('uses_task_dependencies', None) + self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) + self.on_task_failure = kwargs.get('on_task_failure', None) + self.network_configuration = kwargs.get('network_configuration', None) + self.constraints = kwargs.get('constraints', None) + self.job_manager_task = kwargs.get('job_manager_task', None) + self.job_preparation_task = kwargs.get('job_preparation_task', None) + self.job_release_task = kwargs.get('job_release_task', None) + self.common_environment_settings = kwargs.get('common_environment_settings', None) + self.pool_info = kwargs.get('pool_info', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_specification_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_specification_py3.py new file mode 100644 index 00000000..2c1f2a0c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_specification_py3.py @@ -0,0 +1,129 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSpecification(Model): + """Specifies details of the jobs to be created on a schedule. + + All required parameters must be populated in order to send to Azure. + + :param priority: The priority of jobs created under this schedule. + Priority values can range from -1000 to 1000, with -1000 being the lowest + priority and 1000 being the highest priority. The default value is 0. This + priority is used as the default for all jobs under the job schedule. You + can update a job's priority after it has been created using by using the + update job API. + :type priority: int + :param display_name: The display name for jobs created under this + schedule. The name need not be unique and can contain any Unicode + characters up to a maximum length of 1024. + :type display_name: str + :param uses_task_dependencies: Whether tasks in the job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + :param on_all_tasks_complete: The action the Batch service should take + when all tasks in a job created under this schedule are in the completed + state. Note that if a job contains no tasks, then all tasks are considered + complete. This option is therefore most commonly used with a Job Manager + task; if you want to use automatic job termination without a Job Manager, + you should initially set onAllTasksComplete to noaction and update the job + properties to set onAllTasksComplete to terminatejob once you have + finished adding tasks. The default is noaction. Possible values include: + 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + task fails in a job created under this schedule. A task is considered to + have failed if it have failed if has a failureInfo. A failureInfo is set + if the task completes with a non-zero exit code after exhausting its retry + count, or if there was an error starting the task, for example due to a + resource file download error. The default is noaction. Possible values + include: 'noAction', 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param network_configuration: The network configuration for the job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration + :param constraints: The execution constraints for jobs created under this + schedule. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: The details of a Job Manager task to be launched + when a job is started under this schedule. If the job does not specify a + Job Manager task, the user must explicitly add tasks to the job using the + Task API. If the job does specify a Job Manager task, the Batch service + creates the Job Manager task when the job is created, and will try to + schedule the Job Manager task before scheduling other tasks in the job. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation task for jobs created + under this schedule. If a job has a Job Preparation task, the Batch + service will run the Job Preparation task on a compute node before + starting any tasks of that job on that compute node. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release task for jobs created under this + schedule. The primary purpose of the Job Release task is to undo changes + to compute nodes made by the Job Preparation task. Example activities + include deleting local files, or shutting down services that were started + as part of job preparation. A Job Release task cannot be specified without + also specifying a Job Preparation task for the job. The Batch service runs + the Job Release task on the compute nodes that have run the Job + Preparation task. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: A list of common environment variable + settings. These environment variables are set for all tasks in jobs + created under this schedule (including the Job Manager, Job Preparation + and Job Release tasks). Individual tasks can override an environment + setting specified here by specifying the same setting name with a + different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: Required. The pool on which the Batch service runs the + tasks of jobs created under this schedule. + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with each job + created under this schedule as metadata. The Batch service does not assign + any meaning to metadata; it is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, pool_info, priority: int=None, display_name: str=None, uses_task_dependencies: bool=None, on_all_tasks_complete=None, on_task_failure=None, network_configuration=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, metadata=None, **kwargs) -> None: + super(JobSpecification, self).__init__(**kwargs) + self.priority = priority + self.display_name = display_name + self.uses_task_dependencies = uses_task_dependencies + self.on_all_tasks_complete = on_all_tasks_complete + self.on_task_failure = on_task_failure + self.network_configuration = network_configuration + self.constraints = constraints + self.job_manager_task = job_manager_task + self.job_preparation_task = job_preparation_task + self.job_release_task = job_release_task + self.common_environment_settings = common_environment_settings + self.pool_info = pool_info + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_statistics.py b/azext/generated/sdk/batch/v2018_12_01/models/job_statistics.py new file mode 100644 index 00000000..ca95a31d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_statistics.py @@ -0,0 +1,123 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobStatistics(Model): + """Resource usage statistics for a job. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all compute nodes) consumed by all tasks in the job. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all compute nodes) consumed by all tasks in the job. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of all tasks + in the job. The wall clock time is the elapsed time from when the task + started running on a compute node to when it finished (or to the last time + the statistics were updated, if the task had not finished by then). If a + task was retried, this includes the wall clock time of all the task + retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by all tasks in the job. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by all tasks in the job. + :type write_iops: long + :param read_io_gi_b: Required. The total amount of data in GiB read from + disk by all tasks in the job. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total amount of data in GiB written to + disk by all tasks in the job. + :type write_io_gi_b: float + :param num_succeeded_tasks: Required. The total number of tasks + successfully completed in the job during the given time range. A task + completes successfully if it returns exit code 0. + :type num_succeeded_tasks: long + :param num_failed_tasks: Required. The total number of tasks in the job + that failed during the given time range. A task fails if it exhausts its + maximum retry count without returning exit code 0. + :type num_failed_tasks: long + :param num_task_retries: Required. The total number of retries on all the + tasks in the job during the given time range. + :type num_task_retries: long + :param wait_time: Required. The total wait time of all tasks in the job. + The wait time for a task is defined as the elapsed time between the + creation of the task and the start of task execution. (If the task is + retried due to failures, the wait time is the time to the most recent task + execution.) This value is only reported in the account lifetime + statistics; it is not included in the job statistics. + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'num_succeeded_tasks': {'required': True}, + 'num_failed_tasks': {'required': True}, + 'num_task_retries': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'num_succeeded_tasks': {'key': 'numSucceededTasks', 'type': 'long'}, + 'num_failed_tasks': {'key': 'numFailedTasks', 'type': 'long'}, + 'num_task_retries': {'key': 'numTaskRetries', 'type': 'long'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(JobStatistics, self).__init__(**kwargs) + self.url = kwargs.get('url', None) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.user_cpu_time = kwargs.get('user_cpu_time', None) + self.kernel_cpu_time = kwargs.get('kernel_cpu_time', None) + self.wall_clock_time = kwargs.get('wall_clock_time', None) + self.read_iops = kwargs.get('read_iops', None) + self.write_iops = kwargs.get('write_iops', None) + self.read_io_gi_b = kwargs.get('read_io_gi_b', None) + self.write_io_gi_b = kwargs.get('write_io_gi_b', None) + self.num_succeeded_tasks = kwargs.get('num_succeeded_tasks', None) + self.num_failed_tasks = kwargs.get('num_failed_tasks', None) + self.num_task_retries = kwargs.get('num_task_retries', None) + self.wait_time = kwargs.get('wait_time', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_statistics_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_statistics_py3.py new file mode 100644 index 00000000..2f55b15a --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_statistics_py3.py @@ -0,0 +1,123 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobStatistics(Model): + """Resource usage statistics for a job. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all compute nodes) consumed by all tasks in the job. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all compute nodes) consumed by all tasks in the job. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of all tasks + in the job. The wall clock time is the elapsed time from when the task + started running on a compute node to when it finished (or to the last time + the statistics were updated, if the task had not finished by then). If a + task was retried, this includes the wall clock time of all the task + retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by all tasks in the job. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by all tasks in the job. + :type write_iops: long + :param read_io_gi_b: Required. The total amount of data in GiB read from + disk by all tasks in the job. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total amount of data in GiB written to + disk by all tasks in the job. + :type write_io_gi_b: float + :param num_succeeded_tasks: Required. The total number of tasks + successfully completed in the job during the given time range. A task + completes successfully if it returns exit code 0. + :type num_succeeded_tasks: long + :param num_failed_tasks: Required. The total number of tasks in the job + that failed during the given time range. A task fails if it exhausts its + maximum retry count without returning exit code 0. + :type num_failed_tasks: long + :param num_task_retries: Required. The total number of retries on all the + tasks in the job during the given time range. + :type num_task_retries: long + :param wait_time: Required. The total wait time of all tasks in the job. + The wait time for a task is defined as the elapsed time between the + creation of the task and the start of task execution. (If the task is + retried due to failures, the wait time is the time to the most recent task + execution.) This value is only reported in the account lifetime + statistics; it is not included in the job statistics. + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'num_succeeded_tasks': {'required': True}, + 'num_failed_tasks': {'required': True}, + 'num_task_retries': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'num_succeeded_tasks': {'key': 'numSucceededTasks', 'type': 'long'}, + 'num_failed_tasks': {'key': 'numFailedTasks', 'type': 'long'}, + 'num_task_retries': {'key': 'numTaskRetries', 'type': 'long'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, *, url: str, start_time, last_update_time, user_cpu_time, kernel_cpu_time, wall_clock_time, read_iops: int, write_iops: int, read_io_gi_b: float, write_io_gi_b: float, num_succeeded_tasks: int, num_failed_tasks: int, num_task_retries: int, wait_time, **kwargs) -> None: + super(JobStatistics, self).__init__(**kwargs) + self.url = url + self.start_time = start_time + self.last_update_time = last_update_time + self.user_cpu_time = user_cpu_time + self.kernel_cpu_time = kernel_cpu_time + self.wall_clock_time = wall_clock_time + self.read_iops = read_iops + self.write_iops = write_iops + self.read_io_gi_b = read_io_gi_b + self.write_io_gi_b = write_io_gi_b + self.num_succeeded_tasks = num_succeeded_tasks + self.num_failed_tasks = num_failed_tasks + self.num_task_retries = num_task_retries + self.wait_time = wait_time diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_terminate_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_terminate_options.py new file mode 100644 index 00000000..b858c404 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_terminate_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobTerminateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_terminate_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_terminate_options_py3.py new file mode 100644 index 00000000..77173bcc --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_terminate_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobTerminateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_terminate_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/job_terminate_parameter.py new file mode 100644 index 00000000..4be6eaac --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_terminate_parameter.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobTerminateParameter(Model): + """Options when terminating a job. + + :param terminate_reason: The text you want to appear as the job's + TerminateReason. The default is 'UserTerminate'. + :type terminate_reason: str + """ + + _attribute_map = { + 'terminate_reason': {'key': 'terminateReason', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(JobTerminateParameter, self).__init__(**kwargs) + self.terminate_reason = kwargs.get('terminate_reason', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_terminate_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_terminate_parameter_py3.py new file mode 100644 index 00000000..4a496555 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_terminate_parameter_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobTerminateParameter(Model): + """Options when terminating a job. + + :param terminate_reason: The text you want to appear as the job's + TerminateReason. The default is 'UserTerminate'. + :type terminate_reason: str + """ + + _attribute_map = { + 'terminate_reason': {'key': 'terminateReason', 'type': 'str'}, + } + + def __init__(self, *, terminate_reason: str=None, **kwargs) -> None: + super(JobTerminateParameter, self).__init__(**kwargs) + self.terminate_reason = terminate_reason diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_update_options.py b/azext/generated/sdk/batch/v2018_12_01/models/job_update_options.py new file mode 100644 index 00000000..a11f18ab --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_update_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobUpdateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_update_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_update_options_py3.py new file mode 100644 index 00000000..61a47c21 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_update_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobUpdateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_update_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/job_update_parameter.py new file mode 100644 index 00000000..35f83063 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_update_parameter.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobUpdateParameter(Model): + """The set of changes to be made to a job. + + All required parameters must be populated in order to send to Azure. + + :param priority: The priority of the job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. If omitted, it is set to the default value 0. + :type priority: int + :param constraints: The execution constraints for the job. If omitted, the + constraints are cleared. + :type constraints: ~azure.batch.models.JobConstraints + :param pool_info: Required. The pool on which the Batch service runs the + job's tasks. You may change the pool for a job only when the job is + disabled. The Update Job call will fail if you include the poolInfo + element and the job is not disabled. If you specify an + autoPoolSpecification specification in the poolInfo, only the keepAlive + property can be updated, and then only if the auto pool has a + poolLifetimeOption of job. + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with the job as + metadata. If omitted, it takes the default value of an empty list; in + effect, any existing metadata is deleted. + :type metadata: list[~azure.batch.models.MetadataItem] + :param on_all_tasks_complete: The action the Batch service should take + when all tasks in the job are in the completed state. If omitted, the + completion behavior is set to noaction. If the current value is + terminatejob, this is an error because a job's completion behavior may not + be changed from terminatejob to noaction. You may not change the value + from terminatejob to noaction - that is, once you have engaged automatic + job termination, you cannot turn it off again. If you try to do this, the + request fails and Batch returns status code 400 (Bad Request) and an + 'invalid property value' error response. If you do not specify this + element in a PUT request, it is equivalent to passing noaction. This is an + error if the current value is terminatejob. Possible values include: + 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + """ + + _validation = { + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + } + + def __init__(self, **kwargs): + super(JobUpdateParameter, self).__init__(**kwargs) + self.priority = kwargs.get('priority', None) + self.constraints = kwargs.get('constraints', None) + self.pool_info = kwargs.get('pool_info', None) + self.metadata = kwargs.get('metadata', None) + self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/job_update_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/job_update_parameter_py3.py new file mode 100644 index 00000000..9dce5ea1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/job_update_parameter_py3.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobUpdateParameter(Model): + """The set of changes to be made to a job. + + All required parameters must be populated in order to send to Azure. + + :param priority: The priority of the job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. If omitted, it is set to the default value 0. + :type priority: int + :param constraints: The execution constraints for the job. If omitted, the + constraints are cleared. + :type constraints: ~azure.batch.models.JobConstraints + :param pool_info: Required. The pool on which the Batch service runs the + job's tasks. You may change the pool for a job only when the job is + disabled. The Update Job call will fail if you include the poolInfo + element and the job is not disabled. If you specify an + autoPoolSpecification specification in the poolInfo, only the keepAlive + property can be updated, and then only if the auto pool has a + poolLifetimeOption of job. + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with the job as + metadata. If omitted, it takes the default value of an empty list; in + effect, any existing metadata is deleted. + :type metadata: list[~azure.batch.models.MetadataItem] + :param on_all_tasks_complete: The action the Batch service should take + when all tasks in the job are in the completed state. If omitted, the + completion behavior is set to noaction. If the current value is + terminatejob, this is an error because a job's completion behavior may not + be changed from terminatejob to noaction. You may not change the value + from terminatejob to noaction - that is, once you have engaged automatic + job termination, you cannot turn it off again. If you try to do this, the + request fails and Batch returns status code 400 (Bad Request) and an + 'invalid property value' error response. If you do not specify this + element in a PUT request, it is equivalent to passing noaction. This is an + error if the current value is terminatejob. Possible values include: + 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + """ + + _validation = { + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + } + + def __init__(self, *, pool_info, priority: int=None, constraints=None, metadata=None, on_all_tasks_complete=None, **kwargs) -> None: + super(JobUpdateParameter, self).__init__(**kwargs) + self.priority = priority + self.constraints = constraints + self.pool_info = pool_info + self.metadata = metadata + self.on_all_tasks_complete = on_all_tasks_complete diff --git a/azext/generated/sdk/batch/v2018_12_01/models/linux_user_configuration.py b/azext/generated/sdk/batch/v2018_12_01/models/linux_user_configuration.py new file mode 100644 index 00000000..6ba12182 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/linux_user_configuration.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class LinuxUserConfiguration(Model): + """Properties used to create a user account on a Linux node. + + :param uid: The user ID of the user account. The uid and gid properties + must be specified together or not at all. If not specified the underlying + operating system picks the uid. + :type uid: int + :param gid: The group ID for the user account. The uid and gid properties + must be specified together or not at all. If not specified the underlying + operating system picks the gid. + :type gid: int + :param ssh_private_key: The SSH private key for the user account. The + private key must not be password protected. The private key is used to + automatically configure asymmetric-key based authentication for SSH + between nodes in a Linux pool when the pool's enableInterNodeCommunication + property is true (it is ignored if enableInterNodeCommunication is false). + It does this by placing the key pair into the user's .ssh directory. If + not specified, password-less SSH is not configured between nodes (no + modification of the user's .ssh directory is done). + :type ssh_private_key: str + """ + + _attribute_map = { + 'uid': {'key': 'uid', 'type': 'int'}, + 'gid': {'key': 'gid', 'type': 'int'}, + 'ssh_private_key': {'key': 'sshPrivateKey', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(LinuxUserConfiguration, self).__init__(**kwargs) + self.uid = kwargs.get('uid', None) + self.gid = kwargs.get('gid', None) + self.ssh_private_key = kwargs.get('ssh_private_key', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/linux_user_configuration_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/linux_user_configuration_py3.py new file mode 100644 index 00000000..cb35b4c4 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/linux_user_configuration_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class LinuxUserConfiguration(Model): + """Properties used to create a user account on a Linux node. + + :param uid: The user ID of the user account. The uid and gid properties + must be specified together or not at all. If not specified the underlying + operating system picks the uid. + :type uid: int + :param gid: The group ID for the user account. The uid and gid properties + must be specified together or not at all. If not specified the underlying + operating system picks the gid. + :type gid: int + :param ssh_private_key: The SSH private key for the user account. The + private key must not be password protected. The private key is used to + automatically configure asymmetric-key based authentication for SSH + between nodes in a Linux pool when the pool's enableInterNodeCommunication + property is true (it is ignored if enableInterNodeCommunication is false). + It does this by placing the key pair into the user's .ssh directory. If + not specified, password-less SSH is not configured between nodes (no + modification of the user's .ssh directory is done). + :type ssh_private_key: str + """ + + _attribute_map = { + 'uid': {'key': 'uid', 'type': 'int'}, + 'gid': {'key': 'gid', 'type': 'int'}, + 'ssh_private_key': {'key': 'sshPrivateKey', 'type': 'str'}, + } + + def __init__(self, *, uid: int=None, gid: int=None, ssh_private_key: str=None, **kwargs) -> None: + super(LinuxUserConfiguration, self).__init__(**kwargs) + self.uid = uid + self.gid = gid + self.ssh_private_key = ssh_private_key diff --git a/azext/generated/sdk/batch/v2018_12_01/models/metadata_item.py b/azext/generated/sdk/batch/v2018_12_01/models/metadata_item.py new file mode 100644 index 00000000..d1d203e8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/metadata_item.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class MetadataItem(Model): + """A name-value pair associated with a Batch service resource. + + The Batch service does not assign any meaning to this metadata; it is + solely for the use of user code. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the metadata item. + :type name: str + :param value: Required. The value of the metadata item. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(MetadataItem, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/metadata_item_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/metadata_item_py3.py new file mode 100644 index 00000000..3d127cd1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/metadata_item_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class MetadataItem(Model): + """A name-value pair associated with a Batch service resource. + + The Batch service does not assign any meaning to this metadata; it is + solely for the use of user code. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the metadata item. + :type name: str + :param value: Required. The value of the metadata item. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, *, name: str, value: str, **kwargs) -> None: + super(MetadataItem, self).__init__(**kwargs) + self.name = name + self.value = value diff --git a/azext/generated/sdk/batch/v2018_12_01/models/multi_instance_settings.py b/azext/generated/sdk/batch/v2018_12_01/models/multi_instance_settings.py new file mode 100644 index 00000000..0271b6ed --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/multi_instance_settings.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class MultiInstanceSettings(Model): + """Settings which specify how to run a multi-instance task. + + Multi-instance tasks are commonly used to support MPI tasks. In the MPI + case, if any of the subtasks fail (for example due to exiting with a + non-zero exit code) the entire multi-instance task fails. The + multi-instance task is then terminated and retried, up to its retry limit. + + All required parameters must be populated in order to send to Azure. + + :param number_of_instances: The number of compute nodes required by the + task. If omitted, the default is 1. + :type number_of_instances: int + :param coordination_command_line: Required. The command line to run on all + the compute nodes to enable them to coordinate when the primary runs the + main task command. A typical coordination command line launches a + background service and verifies that the service is ready to process + inter-node messages. + :type coordination_command_line: str + :param common_resource_files: A list of files that the Batch service will + download before running the coordination command line. The difference + between common resource files and task resource files is that common + resource files are downloaded for all subtasks including the primary, + whereas task resource files are downloaded only for the primary. Also note + that these resource files are not downloaded to the task working + directory, but instead are downloaded to the task root directory (one + directory above the working directory). There is a maximum size for the + list of resource files. When the max size is exceeded, the request will + fail and the response error code will be RequestEntityTooLarge. If this + occurs, the collection of ResourceFiles must be reduced in size. This can + be achieved using .zip files, Application Packages, or Docker Containers. + :type common_resource_files: list[~azure.batch.models.ResourceFile] + """ + + _validation = { + 'coordination_command_line': {'required': True}, + } + + _attribute_map = { + 'number_of_instances': {'key': 'numberOfInstances', 'type': 'int'}, + 'coordination_command_line': {'key': 'coordinationCommandLine', 'type': 'str'}, + 'common_resource_files': {'key': 'commonResourceFiles', 'type': '[ResourceFile]'}, + } + + def __init__(self, **kwargs): + super(MultiInstanceSettings, self).__init__(**kwargs) + self.number_of_instances = kwargs.get('number_of_instances', None) + self.coordination_command_line = kwargs.get('coordination_command_line', None) + self.common_resource_files = kwargs.get('common_resource_files', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/multi_instance_settings_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/multi_instance_settings_py3.py new file mode 100644 index 00000000..1cbbfa9c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/multi_instance_settings_py3.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class MultiInstanceSettings(Model): + """Settings which specify how to run a multi-instance task. + + Multi-instance tasks are commonly used to support MPI tasks. In the MPI + case, if any of the subtasks fail (for example due to exiting with a + non-zero exit code) the entire multi-instance task fails. The + multi-instance task is then terminated and retried, up to its retry limit. + + All required parameters must be populated in order to send to Azure. + + :param number_of_instances: The number of compute nodes required by the + task. If omitted, the default is 1. + :type number_of_instances: int + :param coordination_command_line: Required. The command line to run on all + the compute nodes to enable them to coordinate when the primary runs the + main task command. A typical coordination command line launches a + background service and verifies that the service is ready to process + inter-node messages. + :type coordination_command_line: str + :param common_resource_files: A list of files that the Batch service will + download before running the coordination command line. The difference + between common resource files and task resource files is that common + resource files are downloaded for all subtasks including the primary, + whereas task resource files are downloaded only for the primary. Also note + that these resource files are not downloaded to the task working + directory, but instead are downloaded to the task root directory (one + directory above the working directory). There is a maximum size for the + list of resource files. When the max size is exceeded, the request will + fail and the response error code will be RequestEntityTooLarge. If this + occurs, the collection of ResourceFiles must be reduced in size. This can + be achieved using .zip files, Application Packages, or Docker Containers. + :type common_resource_files: list[~azure.batch.models.ResourceFile] + """ + + _validation = { + 'coordination_command_line': {'required': True}, + } + + _attribute_map = { + 'number_of_instances': {'key': 'numberOfInstances', 'type': 'int'}, + 'coordination_command_line': {'key': 'coordinationCommandLine', 'type': 'str'}, + 'common_resource_files': {'key': 'commonResourceFiles', 'type': '[ResourceFile]'}, + } + + def __init__(self, *, coordination_command_line: str, number_of_instances: int=None, common_resource_files=None, **kwargs) -> None: + super(MultiInstanceSettings, self).__init__(**kwargs) + self.number_of_instances = number_of_instances + self.coordination_command_line = coordination_command_line + self.common_resource_files = common_resource_files diff --git a/azext/generated/sdk/batch/v2018_12_01/models/name_value_pair.py b/azext/generated/sdk/batch/v2018_12_01/models/name_value_pair.py new file mode 100644 index 00000000..d2775a33 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/name_value_pair.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NameValuePair(Model): + """Represents a name-value pair. + + :param name: The name in the name-value pair. + :type name: str + :param value: The value in the name-value pair. + :type value: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(NameValuePair, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/name_value_pair_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/name_value_pair_py3.py new file mode 100644 index 00000000..9e508e56 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/name_value_pair_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NameValuePair(Model): + """Represents a name-value pair. + + :param name: The name in the name-value pair. + :type name: str + :param value: The value in the name-value pair. + :type value: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, value: str=None, **kwargs) -> None: + super(NameValuePair, self).__init__(**kwargs) + self.name = name + self.value = value diff --git a/azext/generated/sdk/batch/v2018_12_01/models/network_configuration.py b/azext/generated/sdk/batch/v2018_12_01/models/network_configuration.py new file mode 100644 index 00000000..f3f28019 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/network_configuration.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NetworkConfiguration(Model): + """The network configuration for a pool. + + :param subnet_id: The ARM resource identifier of the virtual network + subnet which the compute nodes of the pool will join. This is of the form + /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. + The virtual network must be in the same region and subscription as the + Azure Batch account. The specified subnet should have enough free IP + addresses to accommodate the number of nodes in the pool. If the subnet + doesn't have enough free IP addresses, the pool will partially allocate + compute nodes, and a resize error will occur. For pools created with + virtualMachineConfiguration only ARM virtual networks + ('Microsoft.Network/virtualNetworks') are supported, but for pools created + with cloudServiceConfiguration both ARM and classic virtual networks are + supported. For more details, see: + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration + :type subnet_id: str + :param dynamic_vnet_assignment_scope: The scope of dynamic vnet + assignment. Possible values include: 'none', 'job' + :type dynamic_vnet_assignment_scope: str or + ~azure.batch.models.DynamicVNetAssignmentScope + :param endpoint_configuration: The configuration for endpoints on compute + nodes in the Batch pool. Pool endpoint configuration is only supported on + pools with the virtualMachineConfiguration property. + :type endpoint_configuration: + ~azure.batch.models.PoolEndpointConfiguration + """ + + _attribute_map = { + 'subnet_id': {'key': 'subnetId', 'type': 'str'}, + 'dynamic_vnet_assignment_scope': {'key': 'dynamicVNetAssignmentScope', 'type': 'DynamicVNetAssignmentScope'}, + 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'PoolEndpointConfiguration'}, + } + + def __init__(self, **kwargs): + super(NetworkConfiguration, self).__init__(**kwargs) + self.subnet_id = kwargs.get('subnet_id', None) + self.dynamic_vnet_assignment_scope = kwargs.get('dynamic_vnet_assignment_scope', None) + self.endpoint_configuration = kwargs.get('endpoint_configuration', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/network_configuration_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/network_configuration_py3.py new file mode 100644 index 00000000..3af5d030 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/network_configuration_py3.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NetworkConfiguration(Model): + """The network configuration for a pool. + + :param subnet_id: The ARM resource identifier of the virtual network + subnet which the compute nodes of the pool will join. This is of the form + /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. + The virtual network must be in the same region and subscription as the + Azure Batch account. The specified subnet should have enough free IP + addresses to accommodate the number of nodes in the pool. If the subnet + doesn't have enough free IP addresses, the pool will partially allocate + compute nodes, and a resize error will occur. For pools created with + virtualMachineConfiguration only ARM virtual networks + ('Microsoft.Network/virtualNetworks') are supported, but for pools created + with cloudServiceConfiguration both ARM and classic virtual networks are + supported. For more details, see: + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration + :type subnet_id: str + :param dynamic_vnet_assignment_scope: The scope of dynamic vnet + assignment. Possible values include: 'none', 'job' + :type dynamic_vnet_assignment_scope: str or + ~azure.batch.models.DynamicVNetAssignmentScope + :param endpoint_configuration: The configuration for endpoints on compute + nodes in the Batch pool. Pool endpoint configuration is only supported on + pools with the virtualMachineConfiguration property. + :type endpoint_configuration: + ~azure.batch.models.PoolEndpointConfiguration + """ + + _attribute_map = { + 'subnet_id': {'key': 'subnetId', 'type': 'str'}, + 'dynamic_vnet_assignment_scope': {'key': 'dynamicVNetAssignmentScope', 'type': 'DynamicVNetAssignmentScope'}, + 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'PoolEndpointConfiguration'}, + } + + def __init__(self, *, subnet_id: str=None, dynamic_vnet_assignment_scope=None, endpoint_configuration=None, **kwargs) -> None: + super(NetworkConfiguration, self).__init__(**kwargs) + self.subnet_id = subnet_id + self.dynamic_vnet_assignment_scope = dynamic_vnet_assignment_scope + self.endpoint_configuration = endpoint_configuration diff --git a/azext/generated/sdk/batch/v2018_12_01/models/network_security_group_rule.py b/azext/generated/sdk/batch/v2018_12_01/models/network_security_group_rule.py new file mode 100644 index 00000000..569693bf --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/network_security_group_rule.py @@ -0,0 +1,55 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NetworkSecurityGroupRule(Model): + """A network security group rule to apply to an inbound endpoint. + + All required parameters must be populated in order to send to Azure. + + :param priority: Required. The priority for this rule. Priorities within a + pool must be unique and are evaluated in order of priority. The lower the + number the higher the priority. For example, rules could be specified with + order numbers of 150, 250, and 350. The rule with the order number of 150 + takes precedence over the rule that has an order of 250. Allowed + priorities are 150 to 3500. If any reserved or duplicate values are + provided the request fails with HTTP status code 400. + :type priority: int + :param access: Required. The action that should be taken for a specified + IP address, subnet range or tag. Possible values include: 'allow', 'deny' + :type access: str or ~azure.batch.models.NetworkSecurityGroupRuleAccess + :param source_address_prefix: Required. The source address prefix or tag + to match for the rule. Valid values are a single IP address (i.e. + 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all + addresses). If any other values are provided the request fails with HTTP + status code 400. + :type source_address_prefix: str + """ + + _validation = { + 'priority': {'required': True}, + 'access': {'required': True}, + 'source_address_prefix': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'access': {'key': 'access', 'type': 'NetworkSecurityGroupRuleAccess'}, + 'source_address_prefix': {'key': 'sourceAddressPrefix', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(NetworkSecurityGroupRule, self).__init__(**kwargs) + self.priority = kwargs.get('priority', None) + self.access = kwargs.get('access', None) + self.source_address_prefix = kwargs.get('source_address_prefix', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/network_security_group_rule_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/network_security_group_rule_py3.py new file mode 100644 index 00000000..9fec92ba --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/network_security_group_rule_py3.py @@ -0,0 +1,55 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NetworkSecurityGroupRule(Model): + """A network security group rule to apply to an inbound endpoint. + + All required parameters must be populated in order to send to Azure. + + :param priority: Required. The priority for this rule. Priorities within a + pool must be unique and are evaluated in order of priority. The lower the + number the higher the priority. For example, rules could be specified with + order numbers of 150, 250, and 350. The rule with the order number of 150 + takes precedence over the rule that has an order of 250. Allowed + priorities are 150 to 3500. If any reserved or duplicate values are + provided the request fails with HTTP status code 400. + :type priority: int + :param access: Required. The action that should be taken for a specified + IP address, subnet range or tag. Possible values include: 'allow', 'deny' + :type access: str or ~azure.batch.models.NetworkSecurityGroupRuleAccess + :param source_address_prefix: Required. The source address prefix or tag + to match for the rule. Valid values are a single IP address (i.e. + 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all + addresses). If any other values are provided the request fails with HTTP + status code 400. + :type source_address_prefix: str + """ + + _validation = { + 'priority': {'required': True}, + 'access': {'required': True}, + 'source_address_prefix': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'access': {'key': 'access', 'type': 'NetworkSecurityGroupRuleAccess'}, + 'source_address_prefix': {'key': 'sourceAddressPrefix', 'type': 'str'}, + } + + def __init__(self, *, priority: int, access, source_address_prefix: str, **kwargs) -> None: + super(NetworkSecurityGroupRule, self).__init__(**kwargs) + self.priority = priority + self.access = access + self.source_address_prefix = source_address_prefix diff --git a/azext/generated/sdk/batch/v2018_12_01/models/node_agent_information.py b/azext/generated/sdk/batch/v2018_12_01/models/node_agent_information.py new file mode 100644 index 00000000..0d61a707 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/node_agent_information.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeAgentInformation(Model): + """Information about the node agent. + + The Batch node agent is a program that runs on each node in the pool and + provides Batch capability on the compute node. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The version of the Batch node agent running on + the compute node. This version number can be checked against the node + agent release notes located at + https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. + :type version: str + :param last_update_time: Required. The time when the node agent was + updated on the compute node. This is the most recent time that the node + agent was updated to a new version. + :type last_update_time: datetime + """ + + _validation = { + 'version': {'required': True}, + 'last_update_time': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'version', 'type': 'str'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(NodeAgentInformation, self).__init__(**kwargs) + self.version = kwargs.get('version', None) + self.last_update_time = kwargs.get('last_update_time', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/node_agent_information_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/node_agent_information_py3.py new file mode 100644 index 00000000..770e3ca5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/node_agent_information_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeAgentInformation(Model): + """Information about the node agent. + + The Batch node agent is a program that runs on each node in the pool and + provides Batch capability on the compute node. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The version of the Batch node agent running on + the compute node. This version number can be checked against the node + agent release notes located at + https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. + :type version: str + :param last_update_time: Required. The time when the node agent was + updated on the compute node. This is the most recent time that the node + agent was updated to a new version. + :type last_update_time: datetime + """ + + _validation = { + 'version': {'required': True}, + 'last_update_time': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'version', 'type': 'str'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + } + + def __init__(self, *, version: str, last_update_time, **kwargs) -> None: + super(NodeAgentInformation, self).__init__(**kwargs) + self.version = version + self.last_update_time = last_update_time diff --git a/azext/generated/sdk/batch/v2018_12_01/models/node_agent_sku.py b/azext/generated/sdk/batch/v2018_12_01/models/node_agent_sku.py new file mode 100644 index 00000000..dac567dd --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/node_agent_sku.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeAgentSku(Model): + """A node agent SKU supported by the Batch service. + + The Batch node agent is a program that runs on each node in the pool, and + provides the command-and-control interface between the node and the Batch + service. There are different implementations of the node agent, known as + SKUs, for different operating systems. + + :param id: The ID of the node agent SKU. + :type id: str + :param verified_image_references: The list of Azure Marketplace images + verified to be compatible with this node agent SKU. This collection is not + exhaustive (the node agent may be compatible with other images). + :type verified_image_references: list[~azure.batch.models.ImageReference] + :param os_type: The type of operating system (e.g. Windows or Linux) + compatible with the node agent SKU. Possible values include: 'linux', + 'windows' + :type os_type: str or ~azure.batch.models.OSType + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'verified_image_references': {'key': 'verifiedImageReferences', 'type': '[ImageReference]'}, + 'os_type': {'key': 'osType', 'type': 'OSType'}, + } + + def __init__(self, **kwargs): + super(NodeAgentSku, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.verified_image_references = kwargs.get('verified_image_references', None) + self.os_type = kwargs.get('os_type', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/node_agent_sku_paged.py b/azext/generated/sdk/batch/v2018_12_01/models/node_agent_sku_paged.py new file mode 100644 index 00000000..020e753c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/node_agent_sku_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class NodeAgentSkuPaged(Paged): + """ + A paging container for iterating over a list of :class:`NodeAgentSku ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[NodeAgentSku]'} + } + + def __init__(self, *args, **kwargs): + + super(NodeAgentSkuPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/node_agent_sku_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/node_agent_sku_py3.py new file mode 100644 index 00000000..29475f40 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/node_agent_sku_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeAgentSku(Model): + """A node agent SKU supported by the Batch service. + + The Batch node agent is a program that runs on each node in the pool, and + provides the command-and-control interface between the node and the Batch + service. There are different implementations of the node agent, known as + SKUs, for different operating systems. + + :param id: The ID of the node agent SKU. + :type id: str + :param verified_image_references: The list of Azure Marketplace images + verified to be compatible with this node agent SKU. This collection is not + exhaustive (the node agent may be compatible with other images). + :type verified_image_references: list[~azure.batch.models.ImageReference] + :param os_type: The type of operating system (e.g. Windows or Linux) + compatible with the node agent SKU. Possible values include: 'linux', + 'windows' + :type os_type: str or ~azure.batch.models.OSType + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'verified_image_references': {'key': 'verifiedImageReferences', 'type': '[ImageReference]'}, + 'os_type': {'key': 'osType', 'type': 'OSType'}, + } + + def __init__(self, *, id: str=None, verified_image_references=None, os_type=None, **kwargs) -> None: + super(NodeAgentSku, self).__init__(**kwargs) + self.id = id + self.verified_image_references = verified_image_references + self.os_type = os_type diff --git a/azext/generated/sdk/batch/v2018_12_01/models/node_counts.py b/azext/generated/sdk/batch/v2018_12_01/models/node_counts.py new file mode 100644 index 00000000..de54c0c0 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/node_counts.py @@ -0,0 +1,102 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeCounts(Model): + """The number of nodes in each node state. + + All required parameters must be populated in order to send to Azure. + + :param creating: Required. The number of nodes in the creating state. + :type creating: int + :param idle: Required. The number of nodes in the idle state. + :type idle: int + :param offline: Required. The number of nodes in the offline state. + :type offline: int + :param preempted: Required. The number of nodes in the preempted state. + :type preempted: int + :param rebooting: Required. The count of nodes in the rebooting state. + :type rebooting: int + :param reimaging: Required. The number of nodes in the reimaging state. + :type reimaging: int + :param running: Required. The number of nodes in the running state. + :type running: int + :param starting: Required. The number of nodes in the starting state. + :type starting: int + :param start_task_failed: Required. The number of nodes in the + startTaskFailed state. + :type start_task_failed: int + :param leaving_pool: Required. The number of nodes in the leavingPool + state. + :type leaving_pool: int + :param unknown: Required. The number of nodes in the unknown state. + :type unknown: int + :param unusable: Required. The number of nodes in the unusable state. + :type unusable: int + :param waiting_for_start_task: Required. The number of nodes in the + waitingForStartTask state. + :type waiting_for_start_task: int + :param total: Required. The total number of nodes. + :type total: int + """ + + _validation = { + 'creating': {'required': True}, + 'idle': {'required': True}, + 'offline': {'required': True}, + 'preempted': {'required': True}, + 'rebooting': {'required': True}, + 'reimaging': {'required': True}, + 'running': {'required': True}, + 'starting': {'required': True}, + 'start_task_failed': {'required': True}, + 'leaving_pool': {'required': True}, + 'unknown': {'required': True}, + 'unusable': {'required': True}, + 'waiting_for_start_task': {'required': True}, + 'total': {'required': True}, + } + + _attribute_map = { + 'creating': {'key': 'creating', 'type': 'int'}, + 'idle': {'key': 'idle', 'type': 'int'}, + 'offline': {'key': 'offline', 'type': 'int'}, + 'preempted': {'key': 'preempted', 'type': 'int'}, + 'rebooting': {'key': 'rebooting', 'type': 'int'}, + 'reimaging': {'key': 'reimaging', 'type': 'int'}, + 'running': {'key': 'running', 'type': 'int'}, + 'starting': {'key': 'starting', 'type': 'int'}, + 'start_task_failed': {'key': 'startTaskFailed', 'type': 'int'}, + 'leaving_pool': {'key': 'leavingPool', 'type': 'int'}, + 'unknown': {'key': 'unknown', 'type': 'int'}, + 'unusable': {'key': 'unusable', 'type': 'int'}, + 'waiting_for_start_task': {'key': 'waitingForStartTask', 'type': 'int'}, + 'total': {'key': 'total', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(NodeCounts, self).__init__(**kwargs) + self.creating = kwargs.get('creating', None) + self.idle = kwargs.get('idle', None) + self.offline = kwargs.get('offline', None) + self.preempted = kwargs.get('preempted', None) + self.rebooting = kwargs.get('rebooting', None) + self.reimaging = kwargs.get('reimaging', None) + self.running = kwargs.get('running', None) + self.starting = kwargs.get('starting', None) + self.start_task_failed = kwargs.get('start_task_failed', None) + self.leaving_pool = kwargs.get('leaving_pool', None) + self.unknown = kwargs.get('unknown', None) + self.unusable = kwargs.get('unusable', None) + self.waiting_for_start_task = kwargs.get('waiting_for_start_task', None) + self.total = kwargs.get('total', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/node_counts_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/node_counts_py3.py new file mode 100644 index 00000000..bfeca712 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/node_counts_py3.py @@ -0,0 +1,102 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeCounts(Model): + """The number of nodes in each node state. + + All required parameters must be populated in order to send to Azure. + + :param creating: Required. The number of nodes in the creating state. + :type creating: int + :param idle: Required. The number of nodes in the idle state. + :type idle: int + :param offline: Required. The number of nodes in the offline state. + :type offline: int + :param preempted: Required. The number of nodes in the preempted state. + :type preempted: int + :param rebooting: Required. The count of nodes in the rebooting state. + :type rebooting: int + :param reimaging: Required. The number of nodes in the reimaging state. + :type reimaging: int + :param running: Required. The number of nodes in the running state. + :type running: int + :param starting: Required. The number of nodes in the starting state. + :type starting: int + :param start_task_failed: Required. The number of nodes in the + startTaskFailed state. + :type start_task_failed: int + :param leaving_pool: Required. The number of nodes in the leavingPool + state. + :type leaving_pool: int + :param unknown: Required. The number of nodes in the unknown state. + :type unknown: int + :param unusable: Required. The number of nodes in the unusable state. + :type unusable: int + :param waiting_for_start_task: Required. The number of nodes in the + waitingForStartTask state. + :type waiting_for_start_task: int + :param total: Required. The total number of nodes. + :type total: int + """ + + _validation = { + 'creating': {'required': True}, + 'idle': {'required': True}, + 'offline': {'required': True}, + 'preempted': {'required': True}, + 'rebooting': {'required': True}, + 'reimaging': {'required': True}, + 'running': {'required': True}, + 'starting': {'required': True}, + 'start_task_failed': {'required': True}, + 'leaving_pool': {'required': True}, + 'unknown': {'required': True}, + 'unusable': {'required': True}, + 'waiting_for_start_task': {'required': True}, + 'total': {'required': True}, + } + + _attribute_map = { + 'creating': {'key': 'creating', 'type': 'int'}, + 'idle': {'key': 'idle', 'type': 'int'}, + 'offline': {'key': 'offline', 'type': 'int'}, + 'preempted': {'key': 'preempted', 'type': 'int'}, + 'rebooting': {'key': 'rebooting', 'type': 'int'}, + 'reimaging': {'key': 'reimaging', 'type': 'int'}, + 'running': {'key': 'running', 'type': 'int'}, + 'starting': {'key': 'starting', 'type': 'int'}, + 'start_task_failed': {'key': 'startTaskFailed', 'type': 'int'}, + 'leaving_pool': {'key': 'leavingPool', 'type': 'int'}, + 'unknown': {'key': 'unknown', 'type': 'int'}, + 'unusable': {'key': 'unusable', 'type': 'int'}, + 'waiting_for_start_task': {'key': 'waitingForStartTask', 'type': 'int'}, + 'total': {'key': 'total', 'type': 'int'}, + } + + def __init__(self, *, creating: int, idle: int, offline: int, preempted: int, rebooting: int, reimaging: int, running: int, starting: int, start_task_failed: int, leaving_pool: int, unknown: int, unusable: int, waiting_for_start_task: int, total: int, **kwargs) -> None: + super(NodeCounts, self).__init__(**kwargs) + self.creating = creating + self.idle = idle + self.offline = offline + self.preempted = preempted + self.rebooting = rebooting + self.reimaging = reimaging + self.running = running + self.starting = starting + self.start_task_failed = start_task_failed + self.leaving_pool = leaving_pool + self.unknown = unknown + self.unusable = unusable + self.waiting_for_start_task = waiting_for_start_task + self.total = total diff --git a/azext/generated/sdk/batch/v2018_12_01/models/node_disable_scheduling_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/node_disable_scheduling_parameter.py new file mode 100644 index 00000000..e92b0262 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/node_disable_scheduling_parameter.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeDisableSchedulingParameter(Model): + """Options for disabling scheduling on a compute node. + + :param node_disable_scheduling_option: What to do with currently running + tasks when disabling task scheduling on the compute node. The default + value is requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion' + :type node_disable_scheduling_option: str or + ~azure.batch.models.DisableComputeNodeSchedulingOption + """ + + _attribute_map = { + 'node_disable_scheduling_option': {'key': 'nodeDisableSchedulingOption', 'type': 'DisableComputeNodeSchedulingOption'}, + } + + def __init__(self, **kwargs): + super(NodeDisableSchedulingParameter, self).__init__(**kwargs) + self.node_disable_scheduling_option = kwargs.get('node_disable_scheduling_option', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/node_disable_scheduling_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/node_disable_scheduling_parameter_py3.py new file mode 100644 index 00000000..d6de68c5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/node_disable_scheduling_parameter_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeDisableSchedulingParameter(Model): + """Options for disabling scheduling on a compute node. + + :param node_disable_scheduling_option: What to do with currently running + tasks when disabling task scheduling on the compute node. The default + value is requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion' + :type node_disable_scheduling_option: str or + ~azure.batch.models.DisableComputeNodeSchedulingOption + """ + + _attribute_map = { + 'node_disable_scheduling_option': {'key': 'nodeDisableSchedulingOption', 'type': 'DisableComputeNodeSchedulingOption'}, + } + + def __init__(self, *, node_disable_scheduling_option=None, **kwargs) -> None: + super(NodeDisableSchedulingParameter, self).__init__(**kwargs) + self.node_disable_scheduling_option = node_disable_scheduling_option diff --git a/azext/generated/sdk/batch/v2018_12_01/models/node_file.py b/azext/generated/sdk/batch/v2018_12_01/models/node_file.py new file mode 100644 index 00000000..93fa29d6 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/node_file.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeFile(Model): + """Information about a file or directory on a compute node. + + :param name: The file path. + :type name: str + :param url: The URL of the file. + :type url: str + :param is_directory: Whether the object represents a directory. + :type is_directory: bool + :param properties: The file properties. + :type properties: ~azure.batch.models.FileProperties + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, + 'properties': {'key': 'properties', 'type': 'FileProperties'}, + } + + def __init__(self, **kwargs): + super(NodeFile, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.url = kwargs.get('url', None) + self.is_directory = kwargs.get('is_directory', None) + self.properties = kwargs.get('properties', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/node_file_paged.py b/azext/generated/sdk/batch/v2018_12_01/models/node_file_paged.py new file mode 100644 index 00000000..4463c944 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/node_file_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class NodeFilePaged(Paged): + """ + A paging container for iterating over a list of :class:`NodeFile ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[NodeFile]'} + } + + def __init__(self, *args, **kwargs): + + super(NodeFilePaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/node_file_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/node_file_py3.py new file mode 100644 index 00000000..410f310d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/node_file_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeFile(Model): + """Information about a file or directory on a compute node. + + :param name: The file path. + :type name: str + :param url: The URL of the file. + :type url: str + :param is_directory: Whether the object represents a directory. + :type is_directory: bool + :param properties: The file properties. + :type properties: ~azure.batch.models.FileProperties + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, + 'properties': {'key': 'properties', 'type': 'FileProperties'}, + } + + def __init__(self, *, name: str=None, url: str=None, is_directory: bool=None, properties=None, **kwargs) -> None: + super(NodeFile, self).__init__(**kwargs) + self.name = name + self.url = url + self.is_directory = is_directory + self.properties = properties diff --git a/azext/generated/sdk/batch/v2018_12_01/models/node_reboot_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/node_reboot_parameter.py new file mode 100644 index 00000000..10e13ad7 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/node_reboot_parameter.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeRebootParameter(Model): + """Options for rebooting a compute node. + + :param node_reboot_option: When to reboot the compute node and what to do + with currently running tasks. The default value is requeue. Possible + values include: 'requeue', 'terminate', 'taskCompletion', 'retainedData' + :type node_reboot_option: str or + ~azure.batch.models.ComputeNodeRebootOption + """ + + _attribute_map = { + 'node_reboot_option': {'key': 'nodeRebootOption', 'type': 'ComputeNodeRebootOption'}, + } + + def __init__(self, **kwargs): + super(NodeRebootParameter, self).__init__(**kwargs) + self.node_reboot_option = kwargs.get('node_reboot_option', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/node_reboot_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/node_reboot_parameter_py3.py new file mode 100644 index 00000000..0c21c6d1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/node_reboot_parameter_py3.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeRebootParameter(Model): + """Options for rebooting a compute node. + + :param node_reboot_option: When to reboot the compute node and what to do + with currently running tasks. The default value is requeue. Possible + values include: 'requeue', 'terminate', 'taskCompletion', 'retainedData' + :type node_reboot_option: str or + ~azure.batch.models.ComputeNodeRebootOption + """ + + _attribute_map = { + 'node_reboot_option': {'key': 'nodeRebootOption', 'type': 'ComputeNodeRebootOption'}, + } + + def __init__(self, *, node_reboot_option=None, **kwargs) -> None: + super(NodeRebootParameter, self).__init__(**kwargs) + self.node_reboot_option = node_reboot_option diff --git a/azext/generated/sdk/batch/v2018_12_01/models/node_reimage_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/node_reimage_parameter.py new file mode 100644 index 00000000..aa51f141 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/node_reimage_parameter.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeReimageParameter(Model): + """Options for reimaging a compute node. + + :param node_reimage_option: When to reimage the compute node and what to + do with currently running tasks. The default value is requeue. Possible + values include: 'requeue', 'terminate', 'taskCompletion', 'retainedData' + :type node_reimage_option: str or + ~azure.batch.models.ComputeNodeReimageOption + """ + + _attribute_map = { + 'node_reimage_option': {'key': 'nodeReimageOption', 'type': 'ComputeNodeReimageOption'}, + } + + def __init__(self, **kwargs): + super(NodeReimageParameter, self).__init__(**kwargs) + self.node_reimage_option = kwargs.get('node_reimage_option', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/node_reimage_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/node_reimage_parameter_py3.py new file mode 100644 index 00000000..7af39305 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/node_reimage_parameter_py3.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeReimageParameter(Model): + """Options for reimaging a compute node. + + :param node_reimage_option: When to reimage the compute node and what to + do with currently running tasks. The default value is requeue. Possible + values include: 'requeue', 'terminate', 'taskCompletion', 'retainedData' + :type node_reimage_option: str or + ~azure.batch.models.ComputeNodeReimageOption + """ + + _attribute_map = { + 'node_reimage_option': {'key': 'nodeReimageOption', 'type': 'ComputeNodeReimageOption'}, + } + + def __init__(self, *, node_reimage_option=None, **kwargs) -> None: + super(NodeReimageParameter, self).__init__(**kwargs) + self.node_reimage_option = node_reimage_option diff --git a/azext/generated/sdk/batch/v2018_12_01/models/node_remove_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/node_remove_parameter.py new file mode 100644 index 00000000..f997671b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/node_remove_parameter.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeRemoveParameter(Model): + """Options for removing compute nodes from a pool. + + All required parameters must be populated in order to send to Azure. + + :param node_list: Required. A list containing the IDs of the compute nodes + to be removed from the specified pool. + :type node_list: list[str] + :param resize_timeout: The timeout for removal of compute nodes to the + pool. The default value is 15 minutes. The minimum value is 5 minutes. If + you specify a value less than 5 minutes, the Batch service returns an + error; if you are calling the REST API directly, the HTTP status code is + 400 (Bad Request). + :type resize_timeout: timedelta + :param node_deallocation_option: Determines what to do with a node and its + running task(s) after it has been selected for deallocation. The default + value is requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion', 'retainedData' + :type node_deallocation_option: str or + ~azure.batch.models.ComputeNodeDeallocationOption + """ + + _validation = { + 'node_list': {'required': True, 'max_items': 100}, + } + + _attribute_map = { + 'node_list': {'key': 'nodeList', 'type': '[str]'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, + } + + def __init__(self, **kwargs): + super(NodeRemoveParameter, self).__init__(**kwargs) + self.node_list = kwargs.get('node_list', None) + self.resize_timeout = kwargs.get('resize_timeout', None) + self.node_deallocation_option = kwargs.get('node_deallocation_option', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/node_remove_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/node_remove_parameter_py3.py new file mode 100644 index 00000000..b9dbbc4e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/node_remove_parameter_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeRemoveParameter(Model): + """Options for removing compute nodes from a pool. + + All required parameters must be populated in order to send to Azure. + + :param node_list: Required. A list containing the IDs of the compute nodes + to be removed from the specified pool. + :type node_list: list[str] + :param resize_timeout: The timeout for removal of compute nodes to the + pool. The default value is 15 minutes. The minimum value is 5 minutes. If + you specify a value less than 5 minutes, the Batch service returns an + error; if you are calling the REST API directly, the HTTP status code is + 400 (Bad Request). + :type resize_timeout: timedelta + :param node_deallocation_option: Determines what to do with a node and its + running task(s) after it has been selected for deallocation. The default + value is requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion', 'retainedData' + :type node_deallocation_option: str or + ~azure.batch.models.ComputeNodeDeallocationOption + """ + + _validation = { + 'node_list': {'required': True, 'max_items': 100}, + } + + _attribute_map = { + 'node_list': {'key': 'nodeList', 'type': '[str]'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, + } + + def __init__(self, *, node_list, resize_timeout=None, node_deallocation_option=None, **kwargs) -> None: + super(NodeRemoveParameter, self).__init__(**kwargs) + self.node_list = node_list + self.resize_timeout = resize_timeout + self.node_deallocation_option = node_deallocation_option diff --git a/azext/generated/sdk/batch/v2018_12_01/models/node_update_user_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/node_update_user_parameter.py new file mode 100644 index 00000000..02df471c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/node_update_user_parameter.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeUpdateUserParameter(Model): + """The set of changes to be made to a user account on a node. + + :param password: The password of the account. The password is required for + Windows nodes (those created with 'cloudServiceConfiguration', or created + with 'virtualMachineConfiguration' using a Windows image reference). For + Linux compute nodes, the password can optionally be specified along with + the sshPublicKey property. If omitted, any existing password is removed. + :type password: str + :param expiry_time: The time at which the account should expire. If + omitted, the default is 1 day from the current time. For Linux compute + nodes, the expiryTime has a precision up to a day. + :type expiry_time: datetime + :param ssh_public_key: The SSH public key that can be used for remote + login to the compute node. The public key should be compatible with + OpenSSH encoding and should be base 64 encoded. This property can be + specified only for Linux nodes. If this is specified for a Windows node, + then the Batch service rejects the request; if you are calling the REST + API directly, the HTTP status code is 400 (Bad Request). If omitted, any + existing SSH public key is removed. + :type ssh_public_key: str + """ + + _attribute_map = { + 'password': {'key': 'password', 'type': 'str'}, + 'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'}, + 'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(NodeUpdateUserParameter, self).__init__(**kwargs) + self.password = kwargs.get('password', None) + self.expiry_time = kwargs.get('expiry_time', None) + self.ssh_public_key = kwargs.get('ssh_public_key', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/node_update_user_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/node_update_user_parameter_py3.py new file mode 100644 index 00000000..3ff93927 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/node_update_user_parameter_py3.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeUpdateUserParameter(Model): + """The set of changes to be made to a user account on a node. + + :param password: The password of the account. The password is required for + Windows nodes (those created with 'cloudServiceConfiguration', or created + with 'virtualMachineConfiguration' using a Windows image reference). For + Linux compute nodes, the password can optionally be specified along with + the sshPublicKey property. If omitted, any existing password is removed. + :type password: str + :param expiry_time: The time at which the account should expire. If + omitted, the default is 1 day from the current time. For Linux compute + nodes, the expiryTime has a precision up to a day. + :type expiry_time: datetime + :param ssh_public_key: The SSH public key that can be used for remote + login to the compute node. The public key should be compatible with + OpenSSH encoding and should be base 64 encoded. This property can be + specified only for Linux nodes. If this is specified for a Windows node, + then the Batch service rejects the request; if you are calling the REST + API directly, the HTTP status code is 400 (Bad Request). If omitted, any + existing SSH public key is removed. + :type ssh_public_key: str + """ + + _attribute_map = { + 'password': {'key': 'password', 'type': 'str'}, + 'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'}, + 'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'}, + } + + def __init__(self, *, password: str=None, expiry_time=None, ssh_public_key: str=None, **kwargs) -> None: + super(NodeUpdateUserParameter, self).__init__(**kwargs) + self.password = password + self.expiry_time = expiry_time + self.ssh_public_key = ssh_public_key diff --git a/azext/generated/sdk/batch/v2018_12_01/models/output_file.py b/azext/generated/sdk/batch/v2018_12_01/models/output_file.py new file mode 100644 index 00000000..b16fa592 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/output_file.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFile(Model): + """A specification for uploading files from an Azure Batch node to another + location after the Batch service has finished executing the task process. + + All required parameters must be populated in order to send to Azure. + + :param file_pattern: Required. A pattern indicating which file(s) to + upload. Both relative and absolute paths are supported. Relative paths are + relative to the task working directory. The following wildcards are + supported: * matches 0 or more characters (for example pattern abc* would + match abc or abcdef), ** matches any directory, ? matches any single + character, [abc] matches one character in the brackets, and [a-c] matches + one character in the range. Brackets can include a negation to match any + character not specified (for example [!abc] matches any character but a, + b, or c). If a file name starts with "." it is ignored by default but may + be matched by specifying it explicitly (for example *.gif will not match + .a.gif, but .*.gif will). A simple example: **\\*.txt matches any file + that does not start in '.' and ends with .txt in the task working + directory or any subdirectory. If the filename contains a wildcard + character it can be escaped using brackets (for example abc[*] would match + a file named abc*). Note that both \\ and / are treated as directory + separators on Windows, but only / is on Linux. Environment variables + (%var% on Windows or $var on Linux) are expanded prior to the pattern + being applied. + :type file_pattern: str + :param destination: Required. The destination for the output file(s). + :type destination: ~azure.batch.models.OutputFileDestination + :param upload_options: Required. Additional options for the upload + operation, including under what conditions to perform the upload. + :type upload_options: ~azure.batch.models.OutputFileUploadOptions + """ + + _validation = { + 'file_pattern': {'required': True}, + 'destination': {'required': True}, + 'upload_options': {'required': True}, + } + + _attribute_map = { + 'file_pattern': {'key': 'filePattern', 'type': 'str'}, + 'destination': {'key': 'destination', 'type': 'OutputFileDestination'}, + 'upload_options': {'key': 'uploadOptions', 'type': 'OutputFileUploadOptions'}, + } + + def __init__(self, **kwargs): + super(OutputFile, self).__init__(**kwargs) + self.file_pattern = kwargs.get('file_pattern', None) + self.destination = kwargs.get('destination', None) + self.upload_options = kwargs.get('upload_options', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/output_file_blob_container_destination.py b/azext/generated/sdk/batch/v2018_12_01/models/output_file_blob_container_destination.py new file mode 100644 index 00000000..ee86a589 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/output_file_blob_container_destination.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileBlobContainerDestination(Model): + """Specifies a file upload destination within an Azure blob storage container. + + All required parameters must be populated in order to send to Azure. + + :param path: The destination blob or virtual directory within the Azure + Storage container. If filePattern refers to a specific file (i.e. contains + no wildcards), then path is the name of the blob to which to upload that + file. If filePattern contains one or more wildcards (and therefore may + match multiple files), then path is the name of the blob virtual directory + (which is prepended to each blob name) to which to upload the file(s). If + omitted, file(s) are uploaded to the root of the container with a blob + name matching their file name. + :type path: str + :param container_url: Required. The URL of the container within Azure Blob + Storage to which to upload the file(s). The URL must include a Shared + Access Signature (SAS) granting write permissions to the container. + :type container_url: str + """ + + _validation = { + 'container_url': {'required': True}, + } + + _attribute_map = { + 'path': {'key': 'path', 'type': 'str'}, + 'container_url': {'key': 'containerUrl', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(OutputFileBlobContainerDestination, self).__init__(**kwargs) + self.path = kwargs.get('path', None) + self.container_url = kwargs.get('container_url', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/output_file_blob_container_destination_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/output_file_blob_container_destination_py3.py new file mode 100644 index 00000000..3f0c9ce0 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/output_file_blob_container_destination_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileBlobContainerDestination(Model): + """Specifies a file upload destination within an Azure blob storage container. + + All required parameters must be populated in order to send to Azure. + + :param path: The destination blob or virtual directory within the Azure + Storage container. If filePattern refers to a specific file (i.e. contains + no wildcards), then path is the name of the blob to which to upload that + file. If filePattern contains one or more wildcards (and therefore may + match multiple files), then path is the name of the blob virtual directory + (which is prepended to each blob name) to which to upload the file(s). If + omitted, file(s) are uploaded to the root of the container with a blob + name matching their file name. + :type path: str + :param container_url: Required. The URL of the container within Azure Blob + Storage to which to upload the file(s). The URL must include a Shared + Access Signature (SAS) granting write permissions to the container. + :type container_url: str + """ + + _validation = { + 'container_url': {'required': True}, + } + + _attribute_map = { + 'path': {'key': 'path', 'type': 'str'}, + 'container_url': {'key': 'containerUrl', 'type': 'str'}, + } + + def __init__(self, *, container_url: str, path: str=None, **kwargs) -> None: + super(OutputFileBlobContainerDestination, self).__init__(**kwargs) + self.path = path + self.container_url = container_url diff --git a/azext/generated/sdk/batch/v2018_12_01/models/output_file_destination.py b/azext/generated/sdk/batch/v2018_12_01/models/output_file_destination.py new file mode 100644 index 00000000..1033743c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/output_file_destination.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileDestination(Model): + """The destination to which a file should be uploaded. + + :param container: A location in Azure blob storage to which files are + uploaded. + :type container: ~azure.batch.models.OutputFileBlobContainerDestination + """ + + _attribute_map = { + 'container': {'key': 'container', 'type': 'OutputFileBlobContainerDestination'}, + } + + def __init__(self, **kwargs): + super(OutputFileDestination, self).__init__(**kwargs) + self.container = kwargs.get('container', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/output_file_destination_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/output_file_destination_py3.py new file mode 100644 index 00000000..e7c652b6 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/output_file_destination_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileDestination(Model): + """The destination to which a file should be uploaded. + + :param container: A location in Azure blob storage to which files are + uploaded. + :type container: ~azure.batch.models.OutputFileBlobContainerDestination + """ + + _attribute_map = { + 'container': {'key': 'container', 'type': 'OutputFileBlobContainerDestination'}, + } + + def __init__(self, *, container=None, **kwargs) -> None: + super(OutputFileDestination, self).__init__(**kwargs) + self.container = container diff --git a/azext/generated/sdk/batch/v2018_12_01/models/output_file_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/output_file_py3.py new file mode 100644 index 00000000..fee0d502 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/output_file_py3.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFile(Model): + """A specification for uploading files from an Azure Batch node to another + location after the Batch service has finished executing the task process. + + All required parameters must be populated in order to send to Azure. + + :param file_pattern: Required. A pattern indicating which file(s) to + upload. Both relative and absolute paths are supported. Relative paths are + relative to the task working directory. The following wildcards are + supported: * matches 0 or more characters (for example pattern abc* would + match abc or abcdef), ** matches any directory, ? matches any single + character, [abc] matches one character in the brackets, and [a-c] matches + one character in the range. Brackets can include a negation to match any + character not specified (for example [!abc] matches any character but a, + b, or c). If a file name starts with "." it is ignored by default but may + be matched by specifying it explicitly (for example *.gif will not match + .a.gif, but .*.gif will). A simple example: **\\*.txt matches any file + that does not start in '.' and ends with .txt in the task working + directory or any subdirectory. If the filename contains a wildcard + character it can be escaped using brackets (for example abc[*] would match + a file named abc*). Note that both \\ and / are treated as directory + separators on Windows, but only / is on Linux. Environment variables + (%var% on Windows or $var on Linux) are expanded prior to the pattern + being applied. + :type file_pattern: str + :param destination: Required. The destination for the output file(s). + :type destination: ~azure.batch.models.OutputFileDestination + :param upload_options: Required. Additional options for the upload + operation, including under what conditions to perform the upload. + :type upload_options: ~azure.batch.models.OutputFileUploadOptions + """ + + _validation = { + 'file_pattern': {'required': True}, + 'destination': {'required': True}, + 'upload_options': {'required': True}, + } + + _attribute_map = { + 'file_pattern': {'key': 'filePattern', 'type': 'str'}, + 'destination': {'key': 'destination', 'type': 'OutputFileDestination'}, + 'upload_options': {'key': 'uploadOptions', 'type': 'OutputFileUploadOptions'}, + } + + def __init__(self, *, file_pattern: str, destination, upload_options, **kwargs) -> None: + super(OutputFile, self).__init__(**kwargs) + self.file_pattern = file_pattern + self.destination = destination + self.upload_options = upload_options diff --git a/azext/generated/sdk/batch/v2018_12_01/models/output_file_upload_options.py b/azext/generated/sdk/batch/v2018_12_01/models/output_file_upload_options.py new file mode 100644 index 00000000..c626a355 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/output_file_upload_options.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileUploadOptions(Model): + """Details about an output file upload operation, including under what + conditions to perform the upload. + + All required parameters must be populated in order to send to Azure. + + :param upload_condition: Required. The conditions under which the task + output file or set of files should be uploaded. The default is + taskcompletion. Possible values include: 'taskSuccess', 'taskFailure', + 'taskCompletion' + :type upload_condition: str or + ~azure.batch.models.OutputFileUploadCondition + """ + + _validation = { + 'upload_condition': {'required': True}, + } + + _attribute_map = { + 'upload_condition': {'key': 'uploadCondition', 'type': 'OutputFileUploadCondition'}, + } + + def __init__(self, **kwargs): + super(OutputFileUploadOptions, self).__init__(**kwargs) + self.upload_condition = kwargs.get('upload_condition', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/output_file_upload_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/output_file_upload_options_py3.py new file mode 100644 index 00000000..628d8794 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/output_file_upload_options_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileUploadOptions(Model): + """Details about an output file upload operation, including under what + conditions to perform the upload. + + All required parameters must be populated in order to send to Azure. + + :param upload_condition: Required. The conditions under which the task + output file or set of files should be uploaded. The default is + taskcompletion. Possible values include: 'taskSuccess', 'taskFailure', + 'taskCompletion' + :type upload_condition: str or + ~azure.batch.models.OutputFileUploadCondition + """ + + _validation = { + 'upload_condition': {'required': True}, + } + + _attribute_map = { + 'upload_condition': {'key': 'uploadCondition', 'type': 'OutputFileUploadCondition'}, + } + + def __init__(self, *, upload_condition, **kwargs) -> None: + super(OutputFileUploadOptions, self).__init__(**kwargs) + self.upload_condition = upload_condition diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_add_options.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_add_options.py new file mode 100644 index 00000000..04d968a8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_add_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolAddOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_add_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_add_options_py3.py new file mode 100644 index 00000000..62b3e62b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_add_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolAddOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_add_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_add_parameter.py new file mode 100644 index 00000000..b6524fe3 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_add_parameter.py @@ -0,0 +1,201 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolAddParameter(Model): + """A pool in the Azure Batch service to add. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the pool within the + account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two pool IDs within an account that differ only by case). + :type id: str + :param display_name: The display name for the pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param vm_size: Required. The size of virtual machines in the pool. All + virtual machines in a pool are the same size. For information about + available sizes of virtual machines for Cloud Services pools (pools + created with cloudServiceConfiguration), see Sizes for Cloud Services + (https://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). + Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and + A2V2. For information about available VM sizes for pools using images from + the Virtual Machines Marketplace (pools created with + virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) + (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) + or Sizes for Virtual Machines (Windows) + (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). + Batch supports all Azure VM sizes except STANDARD_A0 and those with + premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the pool. This property and virtualMachineConfiguration are mutually + exclusive and one of the properties must be specified. This property + cannot be specified if the Batch account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the pool. This property and cloudServiceConfiguration are mutually + exclusive and one of the properties must be specified. + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param resize_timeout: The timeout for allocation of compute nodes to the + pool. This timeout applies only to manual scaling; it has no effect when + enableAutoScale is set to true. The default value is 15 minutes. The + minimum value is 5 minutes. If you specify a value less than 5 minutes, + the Batch service returns an error; if you are calling the REST API + directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param target_dedicated_nodes: The desired number of dedicated compute + nodes in the pool. This property must not be specified if enableAutoScale + is set to true. If enableAutoScale is set to false, then you must set + either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + compute nodes in the pool. This property must not be specified if + enableAutoScale is set to true. If enableAutoScale is set to false, then + you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + property is required and the pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: A formula for the desired number of compute + nodes in the pool. This property must not be specified if enableAutoScale + is set to false. It is required if enableAutoScale is set to true. The + formula is checked for validity before the pool is created. If the formula + is not valid, the Batch service rejects the request with detailed error + information. For more information about specifying this formula, see + 'Automatically scale compute nodes in an Azure Batch pool' + (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service returns an error; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + :type auto_scale_evaluation_interval: timedelta + :param enable_inter_node_communication: Whether the pool permits direct + communication between nodes. Enabling inter-node communication limits the + maximum size of the pool due to deployment restrictions on the nodes of + the pool. This may result in the pool not reaching its desired size. The + default value is false. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A task specified to run on each compute node as it + joins the pool. The task runs when the node is added to the pool or when + the node is restarted. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: The list of certificates to be installed on + each compute node in the pool. For Windows compute nodes, the Batch + service installs the certificates to the specified certificate store and + location. For Linux compute nodes, the certificates are stored in a + directory inside the task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this + location. For certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of application packages to + be installed on each compute node in the pool. Changes to application + package references affect all new compute nodes joining the pool, but do + not affect compute nodes that are already in the pool until they are + rebooted or reimaged. There is a maximum of 10 application package + references on any given pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each compute node in the pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + pool creation will fail. + :type application_licenses: list[str] + :param max_tasks_per_node: The maximum number of tasks that can run + concurrently on a single compute node in the pool. The default value is 1. + The maximum value is the smaller of 4 times the number of cores of the + vmSize of the pool or 256. + :type max_tasks_per_node: int + :param task_scheduling_policy: How tasks are distributed across compute + nodes in a pool. If not specified, the default is spread. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param user_accounts: The list of user accounts to be created on each node + in the pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the pool as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'id': {'required': True}, + 'vm_size': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(PoolAddParameter, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.vm_size = kwargs.get('vm_size', None) + self.cloud_service_configuration = kwargs.get('cloud_service_configuration', None) + self.virtual_machine_configuration = kwargs.get('virtual_machine_configuration', None) + self.resize_timeout = kwargs.get('resize_timeout', None) + self.target_dedicated_nodes = kwargs.get('target_dedicated_nodes', None) + self.target_low_priority_nodes = kwargs.get('target_low_priority_nodes', None) + self.enable_auto_scale = kwargs.get('enable_auto_scale', None) + self.auto_scale_formula = kwargs.get('auto_scale_formula', None) + self.auto_scale_evaluation_interval = kwargs.get('auto_scale_evaluation_interval', None) + self.enable_inter_node_communication = kwargs.get('enable_inter_node_communication', None) + self.network_configuration = kwargs.get('network_configuration', None) + self.start_task = kwargs.get('start_task', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.application_licenses = kwargs.get('application_licenses', None) + self.max_tasks_per_node = kwargs.get('max_tasks_per_node', None) + self.task_scheduling_policy = kwargs.get('task_scheduling_policy', None) + self.user_accounts = kwargs.get('user_accounts', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_add_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_add_parameter_py3.py new file mode 100644 index 00000000..6af6b466 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_add_parameter_py3.py @@ -0,0 +1,201 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolAddParameter(Model): + """A pool in the Azure Batch service to add. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the pool within the + account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two pool IDs within an account that differ only by case). + :type id: str + :param display_name: The display name for the pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param vm_size: Required. The size of virtual machines in the pool. All + virtual machines in a pool are the same size. For information about + available sizes of virtual machines for Cloud Services pools (pools + created with cloudServiceConfiguration), see Sizes for Cloud Services + (https://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). + Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and + A2V2. For information about available VM sizes for pools using images from + the Virtual Machines Marketplace (pools created with + virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) + (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) + or Sizes for Virtual Machines (Windows) + (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). + Batch supports all Azure VM sizes except STANDARD_A0 and those with + premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the pool. This property and virtualMachineConfiguration are mutually + exclusive and one of the properties must be specified. This property + cannot be specified if the Batch account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the pool. This property and cloudServiceConfiguration are mutually + exclusive and one of the properties must be specified. + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param resize_timeout: The timeout for allocation of compute nodes to the + pool. This timeout applies only to manual scaling; it has no effect when + enableAutoScale is set to true. The default value is 15 minutes. The + minimum value is 5 minutes. If you specify a value less than 5 minutes, + the Batch service returns an error; if you are calling the REST API + directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param target_dedicated_nodes: The desired number of dedicated compute + nodes in the pool. This property must not be specified if enableAutoScale + is set to true. If enableAutoScale is set to false, then you must set + either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + compute nodes in the pool. This property must not be specified if + enableAutoScale is set to true. If enableAutoScale is set to false, then + you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + property is required and the pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: A formula for the desired number of compute + nodes in the pool. This property must not be specified if enableAutoScale + is set to false. It is required if enableAutoScale is set to true. The + formula is checked for validity before the pool is created. If the formula + is not valid, the Batch service rejects the request with detailed error + information. For more information about specifying this formula, see + 'Automatically scale compute nodes in an Azure Batch pool' + (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service returns an error; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + :type auto_scale_evaluation_interval: timedelta + :param enable_inter_node_communication: Whether the pool permits direct + communication between nodes. Enabling inter-node communication limits the + maximum size of the pool due to deployment restrictions on the nodes of + the pool. This may result in the pool not reaching its desired size. The + default value is false. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A task specified to run on each compute node as it + joins the pool. The task runs when the node is added to the pool or when + the node is restarted. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: The list of certificates to be installed on + each compute node in the pool. For Windows compute nodes, the Batch + service installs the certificates to the specified certificate store and + location. For Linux compute nodes, the certificates are stored in a + directory inside the task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this + location. For certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of application packages to + be installed on each compute node in the pool. Changes to application + package references affect all new compute nodes joining the pool, but do + not affect compute nodes that are already in the pool until they are + rebooted or reimaged. There is a maximum of 10 application package + references on any given pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each compute node in the pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + pool creation will fail. + :type application_licenses: list[str] + :param max_tasks_per_node: The maximum number of tasks that can run + concurrently on a single compute node in the pool. The default value is 1. + The maximum value is the smaller of 4 times the number of cores of the + vmSize of the pool or 256. + :type max_tasks_per_node: int + :param task_scheduling_policy: How tasks are distributed across compute + nodes in a pool. If not specified, the default is spread. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param user_accounts: The list of user accounts to be created on each node + in the pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the pool as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'id': {'required': True}, + 'vm_size': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, id: str, vm_size: str, display_name: str=None, cloud_service_configuration=None, virtual_machine_configuration=None, resize_timeout=None, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, enable_auto_scale: bool=None, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, enable_inter_node_communication: bool=None, network_configuration=None, start_task=None, certificate_references=None, application_package_references=None, application_licenses=None, max_tasks_per_node: int=None, task_scheduling_policy=None, user_accounts=None, metadata=None, **kwargs) -> None: + super(PoolAddParameter, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.vm_size = vm_size + self.cloud_service_configuration = cloud_service_configuration + self.virtual_machine_configuration = virtual_machine_configuration + self.resize_timeout = resize_timeout + self.target_dedicated_nodes = target_dedicated_nodes + self.target_low_priority_nodes = target_low_priority_nodes + self.enable_auto_scale = enable_auto_scale + self.auto_scale_formula = auto_scale_formula + self.auto_scale_evaluation_interval = auto_scale_evaluation_interval + self.enable_inter_node_communication = enable_inter_node_communication + self.network_configuration = network_configuration + self.start_task = start_task + self.certificate_references = certificate_references + self.application_package_references = application_package_references + self.application_licenses = application_licenses + self.max_tasks_per_node = max_tasks_per_node + self.task_scheduling_policy = task_scheduling_policy + self.user_accounts = user_accounts + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_delete_options.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_delete_options.py new file mode 100644 index 00000000..622241dc --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_delete_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolDeleteOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_delete_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_delete_options_py3.py new file mode 100644 index 00000000..7ca41443 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_delete_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolDeleteOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_disable_auto_scale_options.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_disable_auto_scale_options.py new file mode 100644 index 00000000..96b0bc7c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_disable_auto_scale_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolDisableAutoScaleOptions(Model): + """Additional parameters for disable_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolDisableAutoScaleOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_disable_auto_scale_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_disable_auto_scale_options_py3.py new file mode 100644 index 00000000..4a069bd0 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_disable_auto_scale_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolDisableAutoScaleOptions(Model): + """Additional parameters for disable_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolDisableAutoScaleOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_enable_auto_scale_options.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_enable_auto_scale_options.py new file mode 100644 index 00000000..dd77582f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_enable_auto_scale_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEnableAutoScaleOptions(Model): + """Additional parameters for enable_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolEnableAutoScaleOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_enable_auto_scale_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_enable_auto_scale_options_py3.py new file mode 100644 index 00000000..507bd702 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_enable_auto_scale_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEnableAutoScaleOptions(Model): + """Additional parameters for enable_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolEnableAutoScaleOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_enable_auto_scale_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_enable_auto_scale_parameter.py new file mode 100644 index 00000000..793c71c5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_enable_auto_scale_parameter.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEnableAutoScaleParameter(Model): + """Options for enabling automatic scaling on a pool. + + :param auto_scale_formula: The formula for the desired number of compute + nodes in the pool. The formula is checked for validity before it is + applied to the pool. If the formula is not valid, the Batch service + rejects the request with detailed error information. For more information + about specifying this formula, see Automatically scale compute nodes in an + Azure Batch pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service rejects the request with an + invalid property value error; if you are calling the REST API directly, + the HTTP status code is 400 (Bad Request). If you specify a new interval, + then the existing autoscale evaluation schedule will be stopped and a new + autoscale evaluation schedule will be started, with its starting time + being the time when this request was issued. + :type auto_scale_evaluation_interval: timedelta + """ + + _attribute_map = { + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(PoolEnableAutoScaleParameter, self).__init__(**kwargs) + self.auto_scale_formula = kwargs.get('auto_scale_formula', None) + self.auto_scale_evaluation_interval = kwargs.get('auto_scale_evaluation_interval', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_enable_auto_scale_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_enable_auto_scale_parameter_py3.py new file mode 100644 index 00000000..1c0019e4 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_enable_auto_scale_parameter_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEnableAutoScaleParameter(Model): + """Options for enabling automatic scaling on a pool. + + :param auto_scale_formula: The formula for the desired number of compute + nodes in the pool. The formula is checked for validity before it is + applied to the pool. If the formula is not valid, the Batch service + rejects the request with detailed error information. For more information + about specifying this formula, see Automatically scale compute nodes in an + Azure Batch pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service rejects the request with an + invalid property value error; if you are calling the REST API directly, + the HTTP status code is 400 (Bad Request). If you specify a new interval, + then the existing autoscale evaluation schedule will be stopped and a new + autoscale evaluation schedule will be started, with its starting time + being the time when this request was issued. + :type auto_scale_evaluation_interval: timedelta + """ + + _attribute_map = { + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + } + + def __init__(self, *, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, **kwargs) -> None: + super(PoolEnableAutoScaleParameter, self).__init__(**kwargs) + self.auto_scale_formula = auto_scale_formula + self.auto_scale_evaluation_interval = auto_scale_evaluation_interval diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_endpoint_configuration.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_endpoint_configuration.py new file mode 100644 index 00000000..97859ff2 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_endpoint_configuration.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEndpointConfiguration(Model): + """The endpoint configuration for a pool. + + All required parameters must be populated in order to send to Azure. + + :param inbound_nat_pools: Required. A list of inbound NAT pools that can + be used to address specific ports on an individual compute node + externally. The maximum number of inbound NAT pools per Batch pool is 5. + If the maximum number of inbound NAT pools is exceeded the request fails + with HTTP status code 400. + :type inbound_nat_pools: list[~azure.batch.models.InboundNATPool] + """ + + _validation = { + 'inbound_nat_pools': {'required': True}, + } + + _attribute_map = { + 'inbound_nat_pools': {'key': 'inboundNATPools', 'type': '[InboundNATPool]'}, + } + + def __init__(self, **kwargs): + super(PoolEndpointConfiguration, self).__init__(**kwargs) + self.inbound_nat_pools = kwargs.get('inbound_nat_pools', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_endpoint_configuration_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_endpoint_configuration_py3.py new file mode 100644 index 00000000..95788b53 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_endpoint_configuration_py3.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEndpointConfiguration(Model): + """The endpoint configuration for a pool. + + All required parameters must be populated in order to send to Azure. + + :param inbound_nat_pools: Required. A list of inbound NAT pools that can + be used to address specific ports on an individual compute node + externally. The maximum number of inbound NAT pools per Batch pool is 5. + If the maximum number of inbound NAT pools is exceeded the request fails + with HTTP status code 400. + :type inbound_nat_pools: list[~azure.batch.models.InboundNATPool] + """ + + _validation = { + 'inbound_nat_pools': {'required': True}, + } + + _attribute_map = { + 'inbound_nat_pools': {'key': 'inboundNATPools', 'type': '[InboundNATPool]'}, + } + + def __init__(self, *, inbound_nat_pools, **kwargs) -> None: + super(PoolEndpointConfiguration, self).__init__(**kwargs) + self.inbound_nat_pools = inbound_nat_pools diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_evaluate_auto_scale_options.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_evaluate_auto_scale_options.py new file mode 100644 index 00000000..5fbb7ad3 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_evaluate_auto_scale_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEvaluateAutoScaleOptions(Model): + """Additional parameters for evaluate_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolEvaluateAutoScaleOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_evaluate_auto_scale_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_evaluate_auto_scale_options_py3.py new file mode 100644 index 00000000..a2f09b9d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_evaluate_auto_scale_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEvaluateAutoScaleOptions(Model): + """Additional parameters for evaluate_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolEvaluateAutoScaleOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_evaluate_auto_scale_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_evaluate_auto_scale_parameter.py new file mode 100644 index 00000000..c74cfac2 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_evaluate_auto_scale_parameter.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEvaluateAutoScaleParameter(Model): + """Options for evaluating an automatic scaling formula on a pool. + + All required parameters must be populated in order to send to Azure. + + :param auto_scale_formula: Required. The formula for the desired number of + compute nodes in the pool. The formula is validated and its results + calculated, but it is not applied to the pool. To apply the formula to the + pool, 'Enable automatic scaling on a pool'. For more information about + specifying this formula, see Automatically scale compute nodes in an Azure + Batch pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + """ + + _validation = { + 'auto_scale_formula': {'required': True}, + } + + _attribute_map = { + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(PoolEvaluateAutoScaleParameter, self).__init__(**kwargs) + self.auto_scale_formula = kwargs.get('auto_scale_formula', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_evaluate_auto_scale_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_evaluate_auto_scale_parameter_py3.py new file mode 100644 index 00000000..5102b28e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_evaluate_auto_scale_parameter_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEvaluateAutoScaleParameter(Model): + """Options for evaluating an automatic scaling formula on a pool. + + All required parameters must be populated in order to send to Azure. + + :param auto_scale_formula: Required. The formula for the desired number of + compute nodes in the pool. The formula is validated and its results + calculated, but it is not applied to the pool. To apply the formula to the + pool, 'Enable automatic scaling on a pool'. For more information about + specifying this formula, see Automatically scale compute nodes in an Azure + Batch pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + """ + + _validation = { + 'auto_scale_formula': {'required': True}, + } + + _attribute_map = { + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + } + + def __init__(self, *, auto_scale_formula: str, **kwargs) -> None: + super(PoolEvaluateAutoScaleParameter, self).__init__(**kwargs) + self.auto_scale_formula = auto_scale_formula diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_exists_options.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_exists_options.py new file mode 100644 index 00000000..feffd1c9 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_exists_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolExistsOptions(Model): + """Additional parameters for exists operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolExistsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_exists_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_exists_options_py3.py new file mode 100644 index 00000000..de152edb --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_exists_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolExistsOptions(Model): + """Additional parameters for exists operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolExistsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_get_all_lifetime_statistics_options.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_get_all_lifetime_statistics_options.py new file mode 100644 index 00000000..dbbbcf45 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_get_all_lifetime_statistics_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolGetAllLifetimeStatisticsOptions(Model): + """Additional parameters for get_all_lifetime_statistics operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolGetAllLifetimeStatisticsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_get_all_lifetime_statistics_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_get_all_lifetime_statistics_options_py3.py new file mode 100644 index 00000000..0fc18020 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_get_all_lifetime_statistics_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolGetAllLifetimeStatisticsOptions(Model): + """Additional parameters for get_all_lifetime_statistics operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolGetAllLifetimeStatisticsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_get_options.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_get_options.py new file mode 100644 index 00000000..a629c21e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_get_options.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_get_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_get_options_py3.py new file mode 100644 index 00000000..c0b04bd5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_get_options_py3.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolGetOptions, self).__init__(**kwargs) + self.select = select + self.expand = expand + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_information.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_information.py new file mode 100644 index 00000000..132e32bb --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_information.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolInformation(Model): + """Specifies how a job should be assigned to a pool. + + :param pool_id: The ID of an existing pool. All the tasks of the job will + run on the specified pool. You must ensure that the pool referenced by + this property exists. If the pool does not exist at the time the Batch + service tries to schedule a job, no tasks for the job will run until you + create a pool with that id. Note that the Batch service will not reject + the job request; it will simply not run tasks until the pool exists. You + must specify either the pool ID or the auto pool specification, but not + both. + :type pool_id: str + :param auto_pool_specification: Characteristics for a temporary 'auto + pool'. The Batch service will create this auto pool when the job is + submitted. If auto pool creation fails, the Batch service moves the job to + a completed state, and the pool creation error is set in the job's + scheduling error property. The Batch service manages the lifetime (both + creation and, unless keepAlive is specified, deletion) of the auto pool. + Any user actions that affect the lifetime of the auto pool while the job + is active will result in unexpected behavior. You must specify either the + pool ID or the auto pool specification, but not both. + :type auto_pool_specification: ~azure.batch.models.AutoPoolSpecification + """ + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'auto_pool_specification': {'key': 'autoPoolSpecification', 'type': 'AutoPoolSpecification'}, + } + + def __init__(self, **kwargs): + super(PoolInformation, self).__init__(**kwargs) + self.pool_id = kwargs.get('pool_id', None) + self.auto_pool_specification = kwargs.get('auto_pool_specification', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_information_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_information_py3.py new file mode 100644 index 00000000..6fc8d2ce --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_information_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolInformation(Model): + """Specifies how a job should be assigned to a pool. + + :param pool_id: The ID of an existing pool. All the tasks of the job will + run on the specified pool. You must ensure that the pool referenced by + this property exists. If the pool does not exist at the time the Batch + service tries to schedule a job, no tasks for the job will run until you + create a pool with that id. Note that the Batch service will not reject + the job request; it will simply not run tasks until the pool exists. You + must specify either the pool ID or the auto pool specification, but not + both. + :type pool_id: str + :param auto_pool_specification: Characteristics for a temporary 'auto + pool'. The Batch service will create this auto pool when the job is + submitted. If auto pool creation fails, the Batch service moves the job to + a completed state, and the pool creation error is set in the job's + scheduling error property. The Batch service manages the lifetime (both + creation and, unless keepAlive is specified, deletion) of the auto pool. + Any user actions that affect the lifetime of the auto pool while the job + is active will result in unexpected behavior. You must specify either the + pool ID or the auto pool specification, but not both. + :type auto_pool_specification: ~azure.batch.models.AutoPoolSpecification + """ + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'auto_pool_specification': {'key': 'autoPoolSpecification', 'type': 'AutoPoolSpecification'}, + } + + def __init__(self, *, pool_id: str=None, auto_pool_specification=None, **kwargs) -> None: + super(PoolInformation, self).__init__(**kwargs) + self.pool_id = pool_id + self.auto_pool_specification = auto_pool_specification diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_list_options.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_list_options.py new file mode 100644 index 00000000..1b37afe6 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_list_options.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 pools can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_list_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_list_options_py3.py new file mode 100644 index 00000000..5cc33a41 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_list_options_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 pools can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.expand = expand + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_list_usage_metrics_options.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_list_usage_metrics_options.py new file mode 100644 index 00000000..5b52f71a --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_list_usage_metrics_options.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolListUsageMetricsOptions(Model): + """Additional parameters for list_usage_metrics operation. + + :param start_time: The earliest time from which to include metrics. This + must be at least two and a half hours before the current time. If not + specified this defaults to the start time of the last aggregation interval + currently available. + :type start_time: datetime + :param end_time: The latest time from which to include metrics. This must + be at least two hours before the current time. If not specified this + defaults to the end time of the last aggregation interval currently + available. + :type end_time: datetime + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 results will be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'start_time': {'key': '', 'type': 'iso-8601'}, + 'end_time': {'key': '', 'type': 'iso-8601'}, + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolListUsageMetricsOptions, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.filter = kwargs.get('filter', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_list_usage_metrics_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_list_usage_metrics_options_py3.py new file mode 100644 index 00000000..2141cfa5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_list_usage_metrics_options_py3.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolListUsageMetricsOptions(Model): + """Additional parameters for list_usage_metrics operation. + + :param start_time: The earliest time from which to include metrics. This + must be at least two and a half hours before the current time. If not + specified this defaults to the start time of the last aggregation interval + currently available. + :type start_time: datetime + :param end_time: The latest time from which to include metrics. This must + be at least two hours before the current time. If not specified this + defaults to the end time of the last aggregation interval currently + available. + :type end_time: datetime + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 results will be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'start_time': {'key': '', 'type': 'iso-8601'}, + 'end_time': {'key': '', 'type': 'iso-8601'}, + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, start_time=None, end_time=None, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolListUsageMetricsOptions, self).__init__(**kwargs) + self.start_time = start_time + self.end_time = end_time + self.filter = filter + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_node_counts.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_node_counts.py new file mode 100644 index 00000000..0430b0af --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_node_counts.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolNodeCounts(Model): + """The number of nodes in each state for a pool. + + All required parameters must be populated in order to send to Azure. + + :param pool_id: Required. The ID of the pool. + :type pool_id: str + :param dedicated: The number of dedicated nodes in each state. + :type dedicated: ~azure.batch.models.NodeCounts + :param low_priority: The number of low priority nodes in each state. + :type low_priority: ~azure.batch.models.NodeCounts + """ + + _validation = { + 'pool_id': {'required': True}, + } + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'dedicated': {'key': 'dedicated', 'type': 'NodeCounts'}, + 'low_priority': {'key': 'lowPriority', 'type': 'NodeCounts'}, + } + + def __init__(self, **kwargs): + super(PoolNodeCounts, self).__init__(**kwargs) + self.pool_id = kwargs.get('pool_id', None) + self.dedicated = kwargs.get('dedicated', None) + self.low_priority = kwargs.get('low_priority', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_node_counts_paged.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_node_counts_paged.py new file mode 100644 index 00000000..67159e5d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_node_counts_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class PoolNodeCountsPaged(Paged): + """ + A paging container for iterating over a list of :class:`PoolNodeCounts ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[PoolNodeCounts]'} + } + + def __init__(self, *args, **kwargs): + + super(PoolNodeCountsPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_node_counts_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_node_counts_py3.py new file mode 100644 index 00000000..63ef0824 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_node_counts_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolNodeCounts(Model): + """The number of nodes in each state for a pool. + + All required parameters must be populated in order to send to Azure. + + :param pool_id: Required. The ID of the pool. + :type pool_id: str + :param dedicated: The number of dedicated nodes in each state. + :type dedicated: ~azure.batch.models.NodeCounts + :param low_priority: The number of low priority nodes in each state. + :type low_priority: ~azure.batch.models.NodeCounts + """ + + _validation = { + 'pool_id': {'required': True}, + } + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'dedicated': {'key': 'dedicated', 'type': 'NodeCounts'}, + 'low_priority': {'key': 'lowPriority', 'type': 'NodeCounts'}, + } + + def __init__(self, *, pool_id: str, dedicated=None, low_priority=None, **kwargs) -> None: + super(PoolNodeCounts, self).__init__(**kwargs) + self.pool_id = pool_id + self.dedicated = dedicated + self.low_priority = low_priority diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_patch_options.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_patch_options.py new file mode 100644 index 00000000..82b54aef --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_patch_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolPatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolPatchOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_patch_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_patch_options_py3.py new file mode 100644 index 00000000..ff9f10f0 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_patch_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolPatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolPatchOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_patch_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_patch_parameter.py new file mode 100644 index 00000000..b1c20f42 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_patch_parameter.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolPatchParameter(Model): + """The set of changes to be made to a pool. + + :param start_task: A task to run on each compute node as it joins the + pool. The task runs when the node is added to the pool or when the node is + restarted. If this element is present, it overwrites any existing start + task. If omitted, any existing start task is left unchanged. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: A list of certificates to be installed on + each compute node in the pool. If this element is present, it replaces any + existing certificate references configured on the pool. If omitted, any + existing certificate references are left unchanged. For Windows compute + nodes, the Batch service installs the certificates to the specified + certificate store and location. For Linux compute nodes, the certificates + are stored in a directory inside the task working directory and an + environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the task to + query for this location. For certificates with visibility of 'remoteUser', + a 'certs' directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of application packages to + be installed on each compute node in the pool. The list replaces any + existing application package references on the pool. Changes to + application package references affect all new compute nodes joining the + pool, but do not affect compute nodes that are already in the pool until + they are rebooted or reimaged. There is a maximum of 10 application + package references on any given pool. If omitted, any existing application + package references are left unchanged. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param metadata: A list of name-value pairs associated with the pool as + metadata. If this element is present, it replaces any existing metadata + configured on the pool. If you specify an empty collection, any metadata + is removed from the pool. If omitted, any existing metadata is left + unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(PoolPatchParameter, self).__init__(**kwargs) + self.start_task = kwargs.get('start_task', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_patch_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_patch_parameter_py3.py new file mode 100644 index 00000000..e1880729 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_patch_parameter_py3.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolPatchParameter(Model): + """The set of changes to be made to a pool. + + :param start_task: A task to run on each compute node as it joins the + pool. The task runs when the node is added to the pool or when the node is + restarted. If this element is present, it overwrites any existing start + task. If omitted, any existing start task is left unchanged. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: A list of certificates to be installed on + each compute node in the pool. If this element is present, it replaces any + existing certificate references configured on the pool. If omitted, any + existing certificate references are left unchanged. For Windows compute + nodes, the Batch service installs the certificates to the specified + certificate store and location. For Linux compute nodes, the certificates + are stored in a directory inside the task working directory and an + environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the task to + query for this location. For certificates with visibility of 'remoteUser', + a 'certs' directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of application packages to + be installed on each compute node in the pool. The list replaces any + existing application package references on the pool. Changes to + application package references affect all new compute nodes joining the + pool, but do not affect compute nodes that are already in the pool until + they are rebooted or reimaged. There is a maximum of 10 application + package references on any given pool. If omitted, any existing application + package references are left unchanged. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param metadata: A list of name-value pairs associated with the pool as + metadata. If this element is present, it replaces any existing metadata + configured on the pool. If you specify an empty collection, any metadata + is removed from the pool. If omitted, any existing metadata is left + unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, start_task=None, certificate_references=None, application_package_references=None, metadata=None, **kwargs) -> None: + super(PoolPatchParameter, self).__init__(**kwargs) + self.start_task = start_task + self.certificate_references = certificate_references + self.application_package_references = application_package_references + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_remove_nodes_options.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_remove_nodes_options.py new file mode 100644 index 00000000..14be8ddd --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_remove_nodes_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolRemoveNodesOptions(Model): + """Additional parameters for remove_nodes operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolRemoveNodesOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_remove_nodes_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_remove_nodes_options_py3.py new file mode 100644 index 00000000..1fe5eb97 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_remove_nodes_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolRemoveNodesOptions(Model): + """Additional parameters for remove_nodes operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolRemoveNodesOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_resize_options.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_resize_options.py new file mode 100644 index 00000000..e83a7ccc --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_resize_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolResizeOptions(Model): + """Additional parameters for resize operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolResizeOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_resize_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_resize_options_py3.py new file mode 100644 index 00000000..ef457e81 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_resize_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolResizeOptions(Model): + """Additional parameters for resize operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolResizeOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_resize_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_resize_parameter.py new file mode 100644 index 00000000..e37a88c8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_resize_parameter.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolResizeParameter(Model): + """Options for changing the size of a pool. + + :param target_dedicated_nodes: The desired number of dedicated compute + nodes in the pool. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + compute nodes in the pool. + :type target_low_priority_nodes: int + :param resize_timeout: The timeout for allocation of compute nodes to the + pool or removal of compute nodes from the pool. The default value is 15 + minutes. The minimum value is 5 minutes. If you specify a value less than + 5 minutes, the Batch service returns an error; if you are calling the REST + API directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param node_deallocation_option: Determines what to do with a node and its + running task(s) if the pool size is decreasing. The default value is + requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion', 'retainedData' + :type node_deallocation_option: str or + ~azure.batch.models.ComputeNodeDeallocationOption + """ + + _attribute_map = { + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, + } + + def __init__(self, **kwargs): + super(PoolResizeParameter, self).__init__(**kwargs) + self.target_dedicated_nodes = kwargs.get('target_dedicated_nodes', None) + self.target_low_priority_nodes = kwargs.get('target_low_priority_nodes', None) + self.resize_timeout = kwargs.get('resize_timeout', None) + self.node_deallocation_option = kwargs.get('node_deallocation_option', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_resize_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_resize_parameter_py3.py new file mode 100644 index 00000000..6aff469f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_resize_parameter_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolResizeParameter(Model): + """Options for changing the size of a pool. + + :param target_dedicated_nodes: The desired number of dedicated compute + nodes in the pool. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + compute nodes in the pool. + :type target_low_priority_nodes: int + :param resize_timeout: The timeout for allocation of compute nodes to the + pool or removal of compute nodes from the pool. The default value is 15 + minutes. The minimum value is 5 minutes. If you specify a value less than + 5 minutes, the Batch service returns an error; if you are calling the REST + API directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param node_deallocation_option: Determines what to do with a node and its + running task(s) if the pool size is decreasing. The default value is + requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion', 'retainedData' + :type node_deallocation_option: str or + ~azure.batch.models.ComputeNodeDeallocationOption + """ + + _attribute_map = { + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, + } + + def __init__(self, *, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, resize_timeout=None, node_deallocation_option=None, **kwargs) -> None: + super(PoolResizeParameter, self).__init__(**kwargs) + self.target_dedicated_nodes = target_dedicated_nodes + self.target_low_priority_nodes = target_low_priority_nodes + self.resize_timeout = resize_timeout + self.node_deallocation_option = node_deallocation_option diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_specification.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_specification.py new file mode 100644 index 00000000..57e00c25 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_specification.py @@ -0,0 +1,190 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolSpecification(Model): + """Specification for creating a new pool. + + All required parameters must be populated in order to send to Azure. + + :param display_name: The display name for the pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param vm_size: Required. The size of the virtual machines in the pool. + All virtual machines in a pool are the same size. For information about + available sizes of virtual machines in pools, see Choose a VM size for + compute nodes in an Azure Batch pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the pool. This property must be specified if the pool needs to be created + with Azure PaaS VMs. This property and virtualMachineConfiguration are + mutually exclusive and one of the properties must be specified. If neither + is specified then the Batch service returns an error; if you are calling + the REST API directly, the HTTP status code is 400 (Bad Request). This + property cannot be specified if the Batch account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the pool. This property must be specified if the pool needs to be + created with Azure IaaS VMs. This property and cloudServiceConfiguration + are mutually exclusive and one of the properties must be specified. If + neither is specified then the Batch service returns an error; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param max_tasks_per_node: The maximum number of tasks that can run + concurrently on a single compute node in the pool. The default value is 1. + The maximum value is the smaller of 4 times the number of cores of the + vmSize of the pool or 256. + :type max_tasks_per_node: int + :param task_scheduling_policy: How tasks are distributed across compute + nodes in a pool. If not specified, the default is spread. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param resize_timeout: The timeout for allocation of compute nodes to the + pool. This timeout applies only to manual scaling; it has no effect when + enableAutoScale is set to true. The default value is 15 minutes. The + minimum value is 5 minutes. If you specify a value less than 5 minutes, + the Batch service rejects the request with an error; if you are calling + the REST API directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param target_dedicated_nodes: The desired number of dedicated compute + nodes in the pool. This property must not be specified if enableAutoScale + is set to true. If enableAutoScale is set to false, then you must set + either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + compute nodes in the pool. This property must not be specified if + enableAutoScale is set to true. If enableAutoScale is set to false, then + you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + element is required. The pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: The formula for the desired number of compute + nodes in the pool. This property must not be specified if enableAutoScale + is set to false. It is required if enableAutoScale is set to true. The + formula is checked for validity before the pool is created. If the formula + is not valid, the Batch service rejects the request with detailed error + information. + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service rejects the request with an + invalid property value error; if you are calling the REST API directly, + the HTTP status code is 400 (Bad Request). + :type auto_scale_evaluation_interval: timedelta + :param enable_inter_node_communication: Whether the pool permits direct + communication between nodes. Enabling inter-node communication limits the + maximum size of the pool due to deployment restrictions on the nodes of + the pool. This may result in the pool not reaching its desired size. The + default value is false. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A task to run on each compute node as it joins the + pool. The task runs when the node is added to the pool or when the node is + restarted. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: A list of certificates to be installed on + each compute node in the pool. For Windows compute nodes, the Batch + service installs the certificates to the specified certificate store and + location. For Linux compute nodes, the certificates are stored in a + directory inside the task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this + location. For certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of application packages to + be installed on each compute node in the pool. Changes to application + package references affect all new compute nodes joining the pool, but do + not affect compute nodes that are already in the pool until they are + rebooted or reimaged. There is a maximum of 10 application package + references on any given pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each compute node in the pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + pool creation will fail. The permitted licenses available on the pool are + 'maya', 'vray', '3dsmax', 'arnold'. An additional charge applies for each + application license added to the pool. + :type application_licenses: list[str] + :param user_accounts: The list of user accounts to be created on each node + in the pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the pool as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'vm_size': {'required': True}, + } + + _attribute_map = { + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(PoolSpecification, self).__init__(**kwargs) + self.display_name = kwargs.get('display_name', None) + self.vm_size = kwargs.get('vm_size', None) + self.cloud_service_configuration = kwargs.get('cloud_service_configuration', None) + self.virtual_machine_configuration = kwargs.get('virtual_machine_configuration', None) + self.max_tasks_per_node = kwargs.get('max_tasks_per_node', None) + self.task_scheduling_policy = kwargs.get('task_scheduling_policy', None) + self.resize_timeout = kwargs.get('resize_timeout', None) + self.target_dedicated_nodes = kwargs.get('target_dedicated_nodes', None) + self.target_low_priority_nodes = kwargs.get('target_low_priority_nodes', None) + self.enable_auto_scale = kwargs.get('enable_auto_scale', None) + self.auto_scale_formula = kwargs.get('auto_scale_formula', None) + self.auto_scale_evaluation_interval = kwargs.get('auto_scale_evaluation_interval', None) + self.enable_inter_node_communication = kwargs.get('enable_inter_node_communication', None) + self.network_configuration = kwargs.get('network_configuration', None) + self.start_task = kwargs.get('start_task', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.application_licenses = kwargs.get('application_licenses', None) + self.user_accounts = kwargs.get('user_accounts', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_specification_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_specification_py3.py new file mode 100644 index 00000000..ef7e900b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_specification_py3.py @@ -0,0 +1,190 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolSpecification(Model): + """Specification for creating a new pool. + + All required parameters must be populated in order to send to Azure. + + :param display_name: The display name for the pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param vm_size: Required. The size of the virtual machines in the pool. + All virtual machines in a pool are the same size. For information about + available sizes of virtual machines in pools, see Choose a VM size for + compute nodes in an Azure Batch pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the pool. This property must be specified if the pool needs to be created + with Azure PaaS VMs. This property and virtualMachineConfiguration are + mutually exclusive and one of the properties must be specified. If neither + is specified then the Batch service returns an error; if you are calling + the REST API directly, the HTTP status code is 400 (Bad Request). This + property cannot be specified if the Batch account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the pool. This property must be specified if the pool needs to be + created with Azure IaaS VMs. This property and cloudServiceConfiguration + are mutually exclusive and one of the properties must be specified. If + neither is specified then the Batch service returns an error; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param max_tasks_per_node: The maximum number of tasks that can run + concurrently on a single compute node in the pool. The default value is 1. + The maximum value is the smaller of 4 times the number of cores of the + vmSize of the pool or 256. + :type max_tasks_per_node: int + :param task_scheduling_policy: How tasks are distributed across compute + nodes in a pool. If not specified, the default is spread. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param resize_timeout: The timeout for allocation of compute nodes to the + pool. This timeout applies only to manual scaling; it has no effect when + enableAutoScale is set to true. The default value is 15 minutes. The + minimum value is 5 minutes. If you specify a value less than 5 minutes, + the Batch service rejects the request with an error; if you are calling + the REST API directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param target_dedicated_nodes: The desired number of dedicated compute + nodes in the pool. This property must not be specified if enableAutoScale + is set to true. If enableAutoScale is set to false, then you must set + either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + compute nodes in the pool. This property must not be specified if + enableAutoScale is set to true. If enableAutoScale is set to false, then + you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + element is required. The pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: The formula for the desired number of compute + nodes in the pool. This property must not be specified if enableAutoScale + is set to false. It is required if enableAutoScale is set to true. The + formula is checked for validity before the pool is created. If the formula + is not valid, the Batch service rejects the request with detailed error + information. + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service rejects the request with an + invalid property value error; if you are calling the REST API directly, + the HTTP status code is 400 (Bad Request). + :type auto_scale_evaluation_interval: timedelta + :param enable_inter_node_communication: Whether the pool permits direct + communication between nodes. Enabling inter-node communication limits the + maximum size of the pool due to deployment restrictions on the nodes of + the pool. This may result in the pool not reaching its desired size. The + default value is false. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A task to run on each compute node as it joins the + pool. The task runs when the node is added to the pool or when the node is + restarted. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: A list of certificates to be installed on + each compute node in the pool. For Windows compute nodes, the Batch + service installs the certificates to the specified certificate store and + location. For Linux compute nodes, the certificates are stored in a + directory inside the task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this + location. For certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of application packages to + be installed on each compute node in the pool. Changes to application + package references affect all new compute nodes joining the pool, but do + not affect compute nodes that are already in the pool until they are + rebooted or reimaged. There is a maximum of 10 application package + references on any given pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each compute node in the pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + pool creation will fail. The permitted licenses available on the pool are + 'maya', 'vray', '3dsmax', 'arnold'. An additional charge applies for each + application license added to the pool. + :type application_licenses: list[str] + :param user_accounts: The list of user accounts to be created on each node + in the pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the pool as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'vm_size': {'required': True}, + } + + _attribute_map = { + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, vm_size: str, display_name: str=None, cloud_service_configuration=None, virtual_machine_configuration=None, max_tasks_per_node: int=None, task_scheduling_policy=None, resize_timeout=None, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, enable_auto_scale: bool=None, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, enable_inter_node_communication: bool=None, network_configuration=None, start_task=None, certificate_references=None, application_package_references=None, application_licenses=None, user_accounts=None, metadata=None, **kwargs) -> None: + super(PoolSpecification, self).__init__(**kwargs) + self.display_name = display_name + self.vm_size = vm_size + self.cloud_service_configuration = cloud_service_configuration + self.virtual_machine_configuration = virtual_machine_configuration + self.max_tasks_per_node = max_tasks_per_node + self.task_scheduling_policy = task_scheduling_policy + self.resize_timeout = resize_timeout + self.target_dedicated_nodes = target_dedicated_nodes + self.target_low_priority_nodes = target_low_priority_nodes + self.enable_auto_scale = enable_auto_scale + self.auto_scale_formula = auto_scale_formula + self.auto_scale_evaluation_interval = auto_scale_evaluation_interval + self.enable_inter_node_communication = enable_inter_node_communication + self.network_configuration = network_configuration + self.start_task = start_task + self.certificate_references = certificate_references + self.application_package_references = application_package_references + self.application_licenses = application_licenses + self.user_accounts = user_accounts + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_statistics.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_statistics.py new file mode 100644 index 00000000..297e40d5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_statistics.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolStatistics(Model): + """Contains utilization and resource usage statistics for the lifetime of a + pool. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL for the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param usage_stats: Statistics related to pool usage, such as the amount + of core-time used. + :type usage_stats: ~azure.batch.models.UsageStatistics + :param resource_stats: Statistics related to resource consumption by + compute nodes in the pool. + :type resource_stats: ~azure.batch.models.ResourceStatistics + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'usage_stats': {'key': 'usageStats', 'type': 'UsageStatistics'}, + 'resource_stats': {'key': 'resourceStats', 'type': 'ResourceStatistics'}, + } + + def __init__(self, **kwargs): + super(PoolStatistics, self).__init__(**kwargs) + self.url = kwargs.get('url', None) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.usage_stats = kwargs.get('usage_stats', None) + self.resource_stats = kwargs.get('resource_stats', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_statistics_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_statistics_py3.py new file mode 100644 index 00000000..f79e93b8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_statistics_py3.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolStatistics(Model): + """Contains utilization and resource usage statistics for the lifetime of a + pool. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL for the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param usage_stats: Statistics related to pool usage, such as the amount + of core-time used. + :type usage_stats: ~azure.batch.models.UsageStatistics + :param resource_stats: Statistics related to resource consumption by + compute nodes in the pool. + :type resource_stats: ~azure.batch.models.ResourceStatistics + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'usage_stats': {'key': 'usageStats', 'type': 'UsageStatistics'}, + 'resource_stats': {'key': 'resourceStats', 'type': 'ResourceStatistics'}, + } + + def __init__(self, *, url: str, start_time, last_update_time, usage_stats=None, resource_stats=None, **kwargs) -> None: + super(PoolStatistics, self).__init__(**kwargs) + self.url = url + self.start_time = start_time + self.last_update_time = last_update_time + self.usage_stats = usage_stats + self.resource_stats = resource_stats diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_stop_resize_options.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_stop_resize_options.py new file mode 100644 index 00000000..ab8fec73 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_stop_resize_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolStopResizeOptions(Model): + """Additional parameters for stop_resize operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolStopResizeOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_stop_resize_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_stop_resize_options_py3.py new file mode 100644 index 00000000..d5cc404e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_stop_resize_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolStopResizeOptions(Model): + """Additional parameters for stop_resize operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolStopResizeOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_update_properties_options.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_update_properties_options.py new file mode 100644 index 00000000..ca7f97cb --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_update_properties_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUpdatePropertiesOptions(Model): + """Additional parameters for update_properties operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolUpdatePropertiesOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_update_properties_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_update_properties_options_py3.py new file mode 100644 index 00000000..edf5065c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_update_properties_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUpdatePropertiesOptions(Model): + """Additional parameters for update_properties operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolUpdatePropertiesOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_update_properties_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_update_properties_parameter.py new file mode 100644 index 00000000..04812d04 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_update_properties_parameter.py @@ -0,0 +1,75 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUpdatePropertiesParameter(Model): + """The set of changes to be made to a pool. + + All required parameters must be populated in order to send to Azure. + + :param start_task: A task to run on each compute node as it joins the + pool. The task runs when the node is added to the pool or when the node is + restarted. If this element is present, it overwrites any existing start + task. If omitted, any existing start task is removed from the pool. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: Required. A list of certificates to be + installed on each compute node in the pool. This list replaces any + existing certificate references configured on the pool. If you specify an + empty collection, any existing certificate references are removed from the + pool. For Windows compute nodes, the Batch service installs the + certificates to the specified certificate store and location. For Linux + compute nodes, the certificates are stored in a directory inside the task + working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is + supplied to the task to query for this location. For certificates with + visibility of 'remoteUser', a 'certs' directory is created in the user's + home directory (e.g., /home/{user-name}/certs) and certificates are placed + in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: Required. The list of application + packages to be installed on each compute node in the pool. The list + replaces any existing application package references on the pool. Changes + to application package references affect all new compute nodes joining the + pool, but do not affect compute nodes that are already in the pool until + they are rebooted or reimaged. There is a maximum of 10 application + package references on any given pool. If omitted, or if you specify an + empty collection, any existing application packages references are removed + from the pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param metadata: Required. A list of name-value pairs associated with the + pool as metadata. This list replaces any existing metadata configured on + the pool. If omitted, or if you specify an empty collection, any existing + metadata is removed from the pool. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'certificate_references': {'required': True}, + 'application_package_references': {'required': True}, + 'metadata': {'required': True}, + } + + _attribute_map = { + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(PoolUpdatePropertiesParameter, self).__init__(**kwargs) + self.start_task = kwargs.get('start_task', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_update_properties_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_update_properties_parameter_py3.py new file mode 100644 index 00000000..0f75bcb5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_update_properties_parameter_py3.py @@ -0,0 +1,75 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUpdatePropertiesParameter(Model): + """The set of changes to be made to a pool. + + All required parameters must be populated in order to send to Azure. + + :param start_task: A task to run on each compute node as it joins the + pool. The task runs when the node is added to the pool or when the node is + restarted. If this element is present, it overwrites any existing start + task. If omitted, any existing start task is removed from the pool. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: Required. A list of certificates to be + installed on each compute node in the pool. This list replaces any + existing certificate references configured on the pool. If you specify an + empty collection, any existing certificate references are removed from the + pool. For Windows compute nodes, the Batch service installs the + certificates to the specified certificate store and location. For Linux + compute nodes, the certificates are stored in a directory inside the task + working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is + supplied to the task to query for this location. For certificates with + visibility of 'remoteUser', a 'certs' directory is created in the user's + home directory (e.g., /home/{user-name}/certs) and certificates are placed + in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: Required. The list of application + packages to be installed on each compute node in the pool. The list + replaces any existing application package references on the pool. Changes + to application package references affect all new compute nodes joining the + pool, but do not affect compute nodes that are already in the pool until + they are rebooted or reimaged. There is a maximum of 10 application + package references on any given pool. If omitted, or if you specify an + empty collection, any existing application packages references are removed + from the pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param metadata: Required. A list of name-value pairs associated with the + pool as metadata. This list replaces any existing metadata configured on + the pool. If omitted, or if you specify an empty collection, any existing + metadata is removed from the pool. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'certificate_references': {'required': True}, + 'application_package_references': {'required': True}, + 'metadata': {'required': True}, + } + + _attribute_map = { + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, certificate_references, application_package_references, metadata, start_task=None, **kwargs) -> None: + super(PoolUpdatePropertiesParameter, self).__init__(**kwargs) + self.start_task = start_task + self.certificate_references = certificate_references + self.application_package_references = application_package_references + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_usage_metrics.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_usage_metrics.py new file mode 100644 index 00000000..88153c67 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_usage_metrics.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUsageMetrics(Model): + """Usage metrics for a pool across an aggregation interval. + + All required parameters must be populated in order to send to Azure. + + :param pool_id: Required. The ID of the pool whose metrics are aggregated + in this entry. + :type pool_id: str + :param start_time: Required. The start time of the aggregation interval + covered by this entry. + :type start_time: datetime + :param end_time: Required. The end time of the aggregation interval + covered by this entry. + :type end_time: datetime + :param vm_size: Required. The size of virtual machines in the pool. All + VMs in a pool are the same size. For information about available sizes of + virtual machines in pools, see Choose a VM size for compute nodes in an + Azure Batch pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param total_core_hours: Required. The total core hours used in the pool + during this aggregation interval. + :type total_core_hours: float + """ + + _validation = { + 'pool_id': {'required': True}, + 'start_time': {'required': True}, + 'end_time': {'required': True}, + 'vm_size': {'required': True}, + 'total_core_hours': {'required': True}, + } + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'total_core_hours': {'key': 'totalCoreHours', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(PoolUsageMetrics, self).__init__(**kwargs) + self.pool_id = kwargs.get('pool_id', None) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.vm_size = kwargs.get('vm_size', None) + self.total_core_hours = kwargs.get('total_core_hours', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_usage_metrics_paged.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_usage_metrics_paged.py new file mode 100644 index 00000000..891554f1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_usage_metrics_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class PoolUsageMetricsPaged(Paged): + """ + A paging container for iterating over a list of :class:`PoolUsageMetrics ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[PoolUsageMetrics]'} + } + + def __init__(self, *args, **kwargs): + + super(PoolUsageMetricsPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/pool_usage_metrics_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/pool_usage_metrics_py3.py new file mode 100644 index 00000000..963246fc --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/pool_usage_metrics_py3.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUsageMetrics(Model): + """Usage metrics for a pool across an aggregation interval. + + All required parameters must be populated in order to send to Azure. + + :param pool_id: Required. The ID of the pool whose metrics are aggregated + in this entry. + :type pool_id: str + :param start_time: Required. The start time of the aggregation interval + covered by this entry. + :type start_time: datetime + :param end_time: Required. The end time of the aggregation interval + covered by this entry. + :type end_time: datetime + :param vm_size: Required. The size of virtual machines in the pool. All + VMs in a pool are the same size. For information about available sizes of + virtual machines in pools, see Choose a VM size for compute nodes in an + Azure Batch pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param total_core_hours: Required. The total core hours used in the pool + during this aggregation interval. + :type total_core_hours: float + """ + + _validation = { + 'pool_id': {'required': True}, + 'start_time': {'required': True}, + 'end_time': {'required': True}, + 'vm_size': {'required': True}, + 'total_core_hours': {'required': True}, + } + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'total_core_hours': {'key': 'totalCoreHours', 'type': 'float'}, + } + + def __init__(self, *, pool_id: str, start_time, end_time, vm_size: str, total_core_hours: float, **kwargs) -> None: + super(PoolUsageMetrics, self).__init__(**kwargs) + self.pool_id = pool_id + self.start_time = start_time + self.end_time = end_time + self.vm_size = vm_size + self.total_core_hours = total_core_hours diff --git a/azext/generated/sdk/batch/v2018_12_01/models/recent_job.py b/azext/generated/sdk/batch/v2018_12_01/models/recent_job.py new file mode 100644 index 00000000..11d430a5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/recent_job.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RecentJob(Model): + """Information about the most recent job to run under the job schedule. + + :param id: The ID of the job. + :type id: str + :param url: The URL of the job. + :type url: str + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(RecentJob, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.url = kwargs.get('url', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/recent_job_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/recent_job_py3.py new file mode 100644 index 00000000..94b133ae --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/recent_job_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RecentJob(Model): + """Information about the most recent job to run under the job schedule. + + :param id: The ID of the job. + :type id: str + :param url: The URL of the job. + :type url: str + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + def __init__(self, *, id: str=None, url: str=None, **kwargs) -> None: + super(RecentJob, self).__init__(**kwargs) + self.id = id + self.url = url diff --git a/azext/generated/sdk/batch/v2018_12_01/models/resize_error.py b/azext/generated/sdk/batch/v2018_12_01/models/resize_error.py new file mode 100644 index 00000000..8d166d81 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/resize_error.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResizeError(Model): + """An error that occurred when resizing a pool. + + :param code: An identifier for the pool resize error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the pool resize error, intended to be + suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the pool + resize error. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(ResizeError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.values = kwargs.get('values', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/resize_error_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/resize_error_py3.py new file mode 100644 index 00000000..9e400e60 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/resize_error_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResizeError(Model): + """An error that occurred when resizing a pool. + + :param code: An identifier for the pool resize error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the pool resize error, intended to be + suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the pool + resize error. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, code: str=None, message: str=None, values=None, **kwargs) -> None: + super(ResizeError, self).__init__(**kwargs) + self.code = code + self.message = message + self.values = values diff --git a/azext/generated/sdk/batch/v2018_12_01/models/resource_file.py b/azext/generated/sdk/batch/v2018_12_01/models/resource_file.py new file mode 100644 index 00000000..6fa49b27 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/resource_file.py @@ -0,0 +1,85 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResourceFile(Model): + """A single file or multiple files to be downloaded to a compute node. + + :param auto_storage_container_name: The storage container name in the auto + storage account. The autoStorageContainerName, storageContainerUrl and + httpUrl properties are mutually exclusive and one of them must be + specified. + :type auto_storage_container_name: str + :param storage_container_url: The URL of the blob container within Azure + Blob Storage. The autoStorageContainerName, storageContainerUrl and + httpUrl properties are mutually exclusive and one of them must be + specified. This URL must be readable and listable using anonymous access; + that is, the Batch service does not present any credentials when + downloading blobs from the container. There are two ways to get such a URL + for a container in Azure storage: include a Shared Access Signature (SAS) + granting read and list permissions on the container, or set the ACL for + the container to allow public access. + :type storage_container_url: str + :param http_url: The URL of the file to download. The + autoStorageContainerName, storageContainerUrl and httpUrl properties are + mutually exclusive and one of them must be specified. If the URL points to + Azure Blob Storage, it must be readable using anonymous access; that is, + the Batch service does not present any credentials when downloading the + blob. There are two ways to get such a URL for a blob in Azure storage: + include a Shared Access Signature (SAS) granting read permissions on the + blob, or set the ACL for the blob or its container to allow public access. + :type http_url: str + :param blob_prefix: The blob prefix to use when downloading blobs from an + Azure Storage container. Only the blobs whose names begin with the + specified prefix will be downloaded. The property is valid only when + autoStorageContainerName or storageContainerUrl is used. This prefix can + be a partial filename or a subdirectory. If a prefix is not specified, all + the files in the container will be downloaded. + :type blob_prefix: str + :param file_path: The location on the compute node to which to download + the file(s), relative to the task's working directory. If the httpUrl + property is specified, the filePath is required and describes the path + which the file will be downloaded to, including the filename. Otherwise, + if the autoStorageContainerName or storageContainerUrl property is + specified, filePath is optional and is the directory to download the files + to. In the case where filePath is used as a directory, any directory + structure already associated with the input data will be retained in full + and appended to the specified filePath directory. The specified relative + path cannot break out of the task's working directory (for example by + using '..'). + :type file_path: str + :param file_mode: The file permission mode attribute in octal format. This + property applies only to files being downloaded to Linux compute nodes. It + will be ignored if it is specified for a resourceFile which will be + downloaded to a Windows node. If this property is not specified for a + Linux node, then a default value of 0770 is applied to the file. + :type file_mode: str + """ + + _attribute_map = { + 'auto_storage_container_name': {'key': 'autoStorageContainerName', 'type': 'str'}, + 'storage_container_url': {'key': 'storageContainerUrl', 'type': 'str'}, + 'http_url': {'key': 'httpUrl', 'type': 'str'}, + 'blob_prefix': {'key': 'blobPrefix', 'type': 'str'}, + 'file_path': {'key': 'filePath', 'type': 'str'}, + 'file_mode': {'key': 'fileMode', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ResourceFile, self).__init__(**kwargs) + self.auto_storage_container_name = kwargs.get('auto_storage_container_name', None) + self.storage_container_url = kwargs.get('storage_container_url', None) + self.http_url = kwargs.get('http_url', None) + self.blob_prefix = kwargs.get('blob_prefix', None) + self.file_path = kwargs.get('file_path', None) + self.file_mode = kwargs.get('file_mode', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/resource_file_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/resource_file_py3.py new file mode 100644 index 00000000..c7eb780e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/resource_file_py3.py @@ -0,0 +1,85 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResourceFile(Model): + """A single file or multiple files to be downloaded to a compute node. + + :param auto_storage_container_name: The storage container name in the auto + storage account. The autoStorageContainerName, storageContainerUrl and + httpUrl properties are mutually exclusive and one of them must be + specified. + :type auto_storage_container_name: str + :param storage_container_url: The URL of the blob container within Azure + Blob Storage. The autoStorageContainerName, storageContainerUrl and + httpUrl properties are mutually exclusive and one of them must be + specified. This URL must be readable and listable using anonymous access; + that is, the Batch service does not present any credentials when + downloading blobs from the container. There are two ways to get such a URL + for a container in Azure storage: include a Shared Access Signature (SAS) + granting read and list permissions on the container, or set the ACL for + the container to allow public access. + :type storage_container_url: str + :param http_url: The URL of the file to download. The + autoStorageContainerName, storageContainerUrl and httpUrl properties are + mutually exclusive and one of them must be specified. If the URL points to + Azure Blob Storage, it must be readable using anonymous access; that is, + the Batch service does not present any credentials when downloading the + blob. There are two ways to get such a URL for a blob in Azure storage: + include a Shared Access Signature (SAS) granting read permissions on the + blob, or set the ACL for the blob or its container to allow public access. + :type http_url: str + :param blob_prefix: The blob prefix to use when downloading blobs from an + Azure Storage container. Only the blobs whose names begin with the + specified prefix will be downloaded. The property is valid only when + autoStorageContainerName or storageContainerUrl is used. This prefix can + be a partial filename or a subdirectory. If a prefix is not specified, all + the files in the container will be downloaded. + :type blob_prefix: str + :param file_path: The location on the compute node to which to download + the file(s), relative to the task's working directory. If the httpUrl + property is specified, the filePath is required and describes the path + which the file will be downloaded to, including the filename. Otherwise, + if the autoStorageContainerName or storageContainerUrl property is + specified, filePath is optional and is the directory to download the files + to. In the case where filePath is used as a directory, any directory + structure already associated with the input data will be retained in full + and appended to the specified filePath directory. The specified relative + path cannot break out of the task's working directory (for example by + using '..'). + :type file_path: str + :param file_mode: The file permission mode attribute in octal format. This + property applies only to files being downloaded to Linux compute nodes. It + will be ignored if it is specified for a resourceFile which will be + downloaded to a Windows node. If this property is not specified for a + Linux node, then a default value of 0770 is applied to the file. + :type file_mode: str + """ + + _attribute_map = { + 'auto_storage_container_name': {'key': 'autoStorageContainerName', 'type': 'str'}, + 'storage_container_url': {'key': 'storageContainerUrl', 'type': 'str'}, + 'http_url': {'key': 'httpUrl', 'type': 'str'}, + 'blob_prefix': {'key': 'blobPrefix', 'type': 'str'}, + 'file_path': {'key': 'filePath', 'type': 'str'}, + 'file_mode': {'key': 'fileMode', 'type': 'str'}, + } + + def __init__(self, *, auto_storage_container_name: str=None, storage_container_url: str=None, http_url: str=None, blob_prefix: str=None, file_path: str=None, file_mode: str=None, **kwargs) -> None: + super(ResourceFile, self).__init__(**kwargs) + self.auto_storage_container_name = auto_storage_container_name + self.storage_container_url = storage_container_url + self.http_url = http_url + self.blob_prefix = blob_prefix + self.file_path = file_path + self.file_mode = file_mode diff --git a/azext/generated/sdk/batch/v2018_12_01/models/resource_statistics.py b/azext/generated/sdk/batch/v2018_12_01/models/resource_statistics.py new file mode 100644 index 00000000..5e861d9e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/resource_statistics.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResourceStatistics(Model): + """Statistics related to resource consumption by compute nodes in a pool. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param avg_cpu_percentage: Required. The average CPU usage across all + nodes in the pool (percentage per node). + :type avg_cpu_percentage: float + :param avg_memory_gi_b: Required. The average memory usage in GiB across + all nodes in the pool. + :type avg_memory_gi_b: float + :param peak_memory_gi_b: Required. The peak memory usage in GiB across all + nodes in the pool. + :type peak_memory_gi_b: float + :param avg_disk_gi_b: Required. The average used disk space in GiB across + all nodes in the pool. + :type avg_disk_gi_b: float + :param peak_disk_gi_b: Required. The peak used disk space in GiB across + all nodes in the pool. + :type peak_disk_gi_b: float + :param disk_read_iops: Required. The total number of disk read operations + across all nodes in the pool. + :type disk_read_iops: long + :param disk_write_iops: Required. The total number of disk write + operations across all nodes in the pool. + :type disk_write_iops: long + :param disk_read_gi_b: Required. The total amount of data in GiB of disk + reads across all nodes in the pool. + :type disk_read_gi_b: float + :param disk_write_gi_b: Required. The total amount of data in GiB of disk + writes across all nodes in the pool. + :type disk_write_gi_b: float + :param network_read_gi_b: Required. The total amount of data in GiB of + network reads across all nodes in the pool. + :type network_read_gi_b: float + :param network_write_gi_b: Required. The total amount of data in GiB of + network writes across all nodes in the pool. + :type network_write_gi_b: float + """ + + _validation = { + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'avg_cpu_percentage': {'required': True}, + 'avg_memory_gi_b': {'required': True}, + 'peak_memory_gi_b': {'required': True}, + 'avg_disk_gi_b': {'required': True}, + 'peak_disk_gi_b': {'required': True}, + 'disk_read_iops': {'required': True}, + 'disk_write_iops': {'required': True}, + 'disk_read_gi_b': {'required': True}, + 'disk_write_gi_b': {'required': True}, + 'network_read_gi_b': {'required': True}, + 'network_write_gi_b': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'avg_cpu_percentage': {'key': 'avgCPUPercentage', 'type': 'float'}, + 'avg_memory_gi_b': {'key': 'avgMemoryGiB', 'type': 'float'}, + 'peak_memory_gi_b': {'key': 'peakMemoryGiB', 'type': 'float'}, + 'avg_disk_gi_b': {'key': 'avgDiskGiB', 'type': 'float'}, + 'peak_disk_gi_b': {'key': 'peakDiskGiB', 'type': 'float'}, + 'disk_read_iops': {'key': 'diskReadIOps', 'type': 'long'}, + 'disk_write_iops': {'key': 'diskWriteIOps', 'type': 'long'}, + 'disk_read_gi_b': {'key': 'diskReadGiB', 'type': 'float'}, + 'disk_write_gi_b': {'key': 'diskWriteGiB', 'type': 'float'}, + 'network_read_gi_b': {'key': 'networkReadGiB', 'type': 'float'}, + 'network_write_gi_b': {'key': 'networkWriteGiB', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(ResourceStatistics, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.avg_cpu_percentage = kwargs.get('avg_cpu_percentage', None) + self.avg_memory_gi_b = kwargs.get('avg_memory_gi_b', None) + self.peak_memory_gi_b = kwargs.get('peak_memory_gi_b', None) + self.avg_disk_gi_b = kwargs.get('avg_disk_gi_b', None) + self.peak_disk_gi_b = kwargs.get('peak_disk_gi_b', None) + self.disk_read_iops = kwargs.get('disk_read_iops', None) + self.disk_write_iops = kwargs.get('disk_write_iops', None) + self.disk_read_gi_b = kwargs.get('disk_read_gi_b', None) + self.disk_write_gi_b = kwargs.get('disk_write_gi_b', None) + self.network_read_gi_b = kwargs.get('network_read_gi_b', None) + self.network_write_gi_b = kwargs.get('network_write_gi_b', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/resource_statistics_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/resource_statistics_py3.py new file mode 100644 index 00000000..bcf0830f --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/resource_statistics_py3.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResourceStatistics(Model): + """Statistics related to resource consumption by compute nodes in a pool. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param avg_cpu_percentage: Required. The average CPU usage across all + nodes in the pool (percentage per node). + :type avg_cpu_percentage: float + :param avg_memory_gi_b: Required. The average memory usage in GiB across + all nodes in the pool. + :type avg_memory_gi_b: float + :param peak_memory_gi_b: Required. The peak memory usage in GiB across all + nodes in the pool. + :type peak_memory_gi_b: float + :param avg_disk_gi_b: Required. The average used disk space in GiB across + all nodes in the pool. + :type avg_disk_gi_b: float + :param peak_disk_gi_b: Required. The peak used disk space in GiB across + all nodes in the pool. + :type peak_disk_gi_b: float + :param disk_read_iops: Required. The total number of disk read operations + across all nodes in the pool. + :type disk_read_iops: long + :param disk_write_iops: Required. The total number of disk write + operations across all nodes in the pool. + :type disk_write_iops: long + :param disk_read_gi_b: Required. The total amount of data in GiB of disk + reads across all nodes in the pool. + :type disk_read_gi_b: float + :param disk_write_gi_b: Required. The total amount of data in GiB of disk + writes across all nodes in the pool. + :type disk_write_gi_b: float + :param network_read_gi_b: Required. The total amount of data in GiB of + network reads across all nodes in the pool. + :type network_read_gi_b: float + :param network_write_gi_b: Required. The total amount of data in GiB of + network writes across all nodes in the pool. + :type network_write_gi_b: float + """ + + _validation = { + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'avg_cpu_percentage': {'required': True}, + 'avg_memory_gi_b': {'required': True}, + 'peak_memory_gi_b': {'required': True}, + 'avg_disk_gi_b': {'required': True}, + 'peak_disk_gi_b': {'required': True}, + 'disk_read_iops': {'required': True}, + 'disk_write_iops': {'required': True}, + 'disk_read_gi_b': {'required': True}, + 'disk_write_gi_b': {'required': True}, + 'network_read_gi_b': {'required': True}, + 'network_write_gi_b': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'avg_cpu_percentage': {'key': 'avgCPUPercentage', 'type': 'float'}, + 'avg_memory_gi_b': {'key': 'avgMemoryGiB', 'type': 'float'}, + 'peak_memory_gi_b': {'key': 'peakMemoryGiB', 'type': 'float'}, + 'avg_disk_gi_b': {'key': 'avgDiskGiB', 'type': 'float'}, + 'peak_disk_gi_b': {'key': 'peakDiskGiB', 'type': 'float'}, + 'disk_read_iops': {'key': 'diskReadIOps', 'type': 'long'}, + 'disk_write_iops': {'key': 'diskWriteIOps', 'type': 'long'}, + 'disk_read_gi_b': {'key': 'diskReadGiB', 'type': 'float'}, + 'disk_write_gi_b': {'key': 'diskWriteGiB', 'type': 'float'}, + 'network_read_gi_b': {'key': 'networkReadGiB', 'type': 'float'}, + 'network_write_gi_b': {'key': 'networkWriteGiB', 'type': 'float'}, + } + + def __init__(self, *, start_time, last_update_time, avg_cpu_percentage: float, avg_memory_gi_b: float, peak_memory_gi_b: float, avg_disk_gi_b: float, peak_disk_gi_b: float, disk_read_iops: int, disk_write_iops: int, disk_read_gi_b: float, disk_write_gi_b: float, network_read_gi_b: float, network_write_gi_b: float, **kwargs) -> None: + super(ResourceStatistics, self).__init__(**kwargs) + self.start_time = start_time + self.last_update_time = last_update_time + self.avg_cpu_percentage = avg_cpu_percentage + self.avg_memory_gi_b = avg_memory_gi_b + self.peak_memory_gi_b = peak_memory_gi_b + self.avg_disk_gi_b = avg_disk_gi_b + self.peak_disk_gi_b = peak_disk_gi_b + self.disk_read_iops = disk_read_iops + self.disk_write_iops = disk_write_iops + self.disk_read_gi_b = disk_read_gi_b + self.disk_write_gi_b = disk_write_gi_b + self.network_read_gi_b = network_read_gi_b + self.network_write_gi_b = network_write_gi_b diff --git a/azext/generated/sdk/batch/v2018_12_01/models/schedule.py b/azext/generated/sdk/batch/v2018_12_01/models/schedule.py new file mode 100644 index 00000000..e6339eb9 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/schedule.py @@ -0,0 +1,74 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Schedule(Model): + """The schedule according to which jobs will be created. + + :param do_not_run_until: The earliest time at which any job may be created + under this job schedule. If you do not specify a doNotRunUntil time, the + schedule becomes ready to create jobs immediately. + :type do_not_run_until: datetime + :param do_not_run_after: A time after which no job will be created under + this job schedule. The schedule will move to the completed state as soon + as this deadline is past and there is no active job under this job + schedule. If you do not specify a doNotRunAfter time, and you are creating + a recurring job schedule, the job schedule will remain active until you + explicitly terminate it. + :type do_not_run_after: datetime + :param start_window: The time interval, starting from the time at which + the schedule indicates a job should be created, within which a job must be + created. If a job is not created within the startWindow interval, then the + 'opportunity' is lost; no job will be created until the next recurrence of + the schedule. If the schedule is recurring, and the startWindow is longer + than the recurrence interval, then this is equivalent to an infinite + startWindow, because the job that is 'due' in one recurrenceInterval is + not carried forward into the next recurrence interval. The default is + infinite. The minimum value is 1 minute. If you specify a lower value, the + Batch service rejects the schedule with an error; if you are calling the + REST API directly, the HTTP status code is 400 (Bad Request). + :type start_window: timedelta + :param recurrence_interval: The time interval between the start times of + two successive jobs under the job schedule. A job schedule can have at + most one active job under it at any given time. Because a job schedule can + have at most one active job under it at any given time, if it is time to + create a new job under a job schedule, but the previous job is still + running, the Batch service will not create the new job until the previous + job finishes. If the previous job does not finish within the startWindow + period of the new recurrenceInterval, then no new job will be scheduled + for that interval. For recurring jobs, you should normally specify a + jobManagerTask in the jobSpecification. If you do not use jobManagerTask, + you will need an external process to monitor when jobs are created, add + tasks to the jobs and terminate the jobs ready for the next recurrence. + The default is that the schedule does not recur: one job is created, + within the startWindow after the doNotRunUntil time, and the schedule is + complete as soon as that job finishes. The minimum value is 1 minute. If + you specify a lower value, the Batch service rejects the schedule with an + error; if you are calling the REST API directly, the HTTP status code is + 400 (Bad Request). + :type recurrence_interval: timedelta + """ + + _attribute_map = { + 'do_not_run_until': {'key': 'doNotRunUntil', 'type': 'iso-8601'}, + 'do_not_run_after': {'key': 'doNotRunAfter', 'type': 'iso-8601'}, + 'start_window': {'key': 'startWindow', 'type': 'duration'}, + 'recurrence_interval': {'key': 'recurrenceInterval', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(Schedule, self).__init__(**kwargs) + self.do_not_run_until = kwargs.get('do_not_run_until', None) + self.do_not_run_after = kwargs.get('do_not_run_after', None) + self.start_window = kwargs.get('start_window', None) + self.recurrence_interval = kwargs.get('recurrence_interval', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/schedule_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/schedule_py3.py new file mode 100644 index 00000000..66ab18a4 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/schedule_py3.py @@ -0,0 +1,74 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Schedule(Model): + """The schedule according to which jobs will be created. + + :param do_not_run_until: The earliest time at which any job may be created + under this job schedule. If you do not specify a doNotRunUntil time, the + schedule becomes ready to create jobs immediately. + :type do_not_run_until: datetime + :param do_not_run_after: A time after which no job will be created under + this job schedule. The schedule will move to the completed state as soon + as this deadline is past and there is no active job under this job + schedule. If you do not specify a doNotRunAfter time, and you are creating + a recurring job schedule, the job schedule will remain active until you + explicitly terminate it. + :type do_not_run_after: datetime + :param start_window: The time interval, starting from the time at which + the schedule indicates a job should be created, within which a job must be + created. If a job is not created within the startWindow interval, then the + 'opportunity' is lost; no job will be created until the next recurrence of + the schedule. If the schedule is recurring, and the startWindow is longer + than the recurrence interval, then this is equivalent to an infinite + startWindow, because the job that is 'due' in one recurrenceInterval is + not carried forward into the next recurrence interval. The default is + infinite. The minimum value is 1 minute. If you specify a lower value, the + Batch service rejects the schedule with an error; if you are calling the + REST API directly, the HTTP status code is 400 (Bad Request). + :type start_window: timedelta + :param recurrence_interval: The time interval between the start times of + two successive jobs under the job schedule. A job schedule can have at + most one active job under it at any given time. Because a job schedule can + have at most one active job under it at any given time, if it is time to + create a new job under a job schedule, but the previous job is still + running, the Batch service will not create the new job until the previous + job finishes. If the previous job does not finish within the startWindow + period of the new recurrenceInterval, then no new job will be scheduled + for that interval. For recurring jobs, you should normally specify a + jobManagerTask in the jobSpecification. If you do not use jobManagerTask, + you will need an external process to monitor when jobs are created, add + tasks to the jobs and terminate the jobs ready for the next recurrence. + The default is that the schedule does not recur: one job is created, + within the startWindow after the doNotRunUntil time, and the schedule is + complete as soon as that job finishes. The minimum value is 1 minute. If + you specify a lower value, the Batch service rejects the schedule with an + error; if you are calling the REST API directly, the HTTP status code is + 400 (Bad Request). + :type recurrence_interval: timedelta + """ + + _attribute_map = { + 'do_not_run_until': {'key': 'doNotRunUntil', 'type': 'iso-8601'}, + 'do_not_run_after': {'key': 'doNotRunAfter', 'type': 'iso-8601'}, + 'start_window': {'key': 'startWindow', 'type': 'duration'}, + 'recurrence_interval': {'key': 'recurrenceInterval', 'type': 'duration'}, + } + + def __init__(self, *, do_not_run_until=None, do_not_run_after=None, start_window=None, recurrence_interval=None, **kwargs) -> None: + super(Schedule, self).__init__(**kwargs) + self.do_not_run_until = do_not_run_until + self.do_not_run_after = do_not_run_after + self.start_window = start_window + self.recurrence_interval = recurrence_interval diff --git a/azext/generated/sdk/batch/v2018_12_01/models/start_task.py b/azext/generated/sdk/batch/v2018_12_01/models/start_task.py new file mode 100644 index 00000000..b2d58bbc --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/start_task.py @@ -0,0 +1,114 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class StartTask(Model): + """A task which is run when a compute node joins a pool in the Azure Batch + service, or when the compute node is rebooted or reimaged. + + Batch will retry tasks when a recovery operation is triggered on a compute + node. Examples of recovery operations include (but are not limited to) when + an unhealthy compute node is rebooted or a compute node disappeared due to + host failure. Retries due to recovery operations are independent of and are + not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is + 0, an internal retry due to a recovery operation may occur. Because of + this, all tasks should be idempotent. This means tasks need to tolerate + being interrupted and restarted without causing any corruption or duplicate + data. The best practice for long running tasks is to use some form of + checkpointing. In some cases the start task may be re-run even though the + node was not rebooted. Special care should be taken to avoid start tasks + which create breakaway process or install/launch services from the start + task working directory, as this will block Batch from being able to re-run + the start task. + + All required parameters must be populated in order to send to Azure. + + :param command_line: Required. The command line of the start task. The + command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + start task runs. When this is specified, all directories recursively below + the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the + node) are mapped into the container, all task environment variables are + mapped into the container, and the task command line is executed in the + container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. Files listed under this + element are located in the task's working directory. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the start task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param user_identity: The user identity under which the start task runs. + If omitted, the task runs as a non-administrative user unique to the task. + :type user_identity: ~azure.batch.models.UserIdentity + :param max_task_retry_count: The maximum number of times the task may be + retried. The Batch service retries a task if its exit code is nonzero. + Note that this value specifically controls the number of retries. The + Batch service will try the task once, and may then retry up to this limit. + For example, if the maximum retry count is 3, Batch tries the task up to 4 + times (one initial try and 3 retries). If the maximum retry count is 0, + the Batch service does not retry the task. If the maximum retry count is + -1, the Batch service retries the task without limit. + :type max_task_retry_count: int + :param wait_for_success: Whether the Batch service should wait for the + start task to complete successfully (that is, to exit with exit code 0) + before scheduling any tasks on the compute node. If true and the start + task fails on a compute node, the Batch service retries the start task up + to its maximum retry count (maxTaskRetryCount). If the task has still not + completed successfully after all retries, then the Batch service marks the + compute node unusable, and will not schedule tasks to it. This condition + can be detected via the node state and failure info details. If false, the + Batch service will not wait for the start task to complete. In this case, + other tasks can start executing on the compute node while the start task + is still running; and even if the start task fails, new tasks will + continue to be scheduled on the node. The default is false. + :type wait_for_success: bool + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + 'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(StartTask, self).__init__(**kwargs) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.resource_files = kwargs.get('resource_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.user_identity = kwargs.get('user_identity', None) + self.max_task_retry_count = kwargs.get('max_task_retry_count', None) + self.wait_for_success = kwargs.get('wait_for_success', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/start_task_information.py b/azext/generated/sdk/batch/v2018_12_01/models/start_task_information.py new file mode 100644 index 00000000..e8b68b08 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/start_task_information.py @@ -0,0 +1,99 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class StartTaskInformation(Model): + """Information about a start task running on a compute node. + + All required parameters must be populated in order to send to Azure. + + :param state: Required. The state of the start task on the compute node. + Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.StartTaskState + :param start_time: Required. The time at which the start task started + running. This value is reset every time the task is restarted or retried + (that is, this is the most recent time at which the start task started + running). + :type start_time: datetime + :param end_time: The time at which the start task stopped running. This is + the end time of the most recent run of the start task, if that run has + completed (even if that run failed and a retry is pending). This element + is not present if the start task is currently running. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the start task + command line. This property is set only if the start task is in the + completed state. In general, the exit code for a process reflects the + specific convention implemented by the application developer for that + process. If you use the exit code value to make decisions in your code, be + sure that you know the exit code convention used by the application + process. However, if the Batch service terminates the start task (due to + timeout, or user termination via the API) you may see an operating + system-defined exit code. + :type exit_code: int + :param container_info: Information about the container under which the + task is executing. This property is set only if the task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the task failure, if any. This + property is set only if the task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the task could not be run) and + file upload errors are not retried. The Batch service will retry the task + up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the task + started running. This element is present only if the task was retried + (i.e. retryCount is nonzero). If present, this is typically the same as + startTime, but may be different if the task has been restarted for reasons + other than retry; for example, if the compute node was rebooted during a + retry, then the startTime is updated but the lastRetryTime is not. + :type last_retry_time: datetime + :param result: The result of the task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'state': {'required': True}, + 'start_time': {'required': True}, + 'retry_count': {'required': True}, + } + + _attribute_map = { + 'state': {'key': 'state', 'type': 'StartTaskState'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, **kwargs): + super(StartTaskInformation, self).__init__(**kwargs) + self.state = kwargs.get('state', None) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.exit_code = kwargs.get('exit_code', None) + self.container_info = kwargs.get('container_info', None) + self.failure_info = kwargs.get('failure_info', None) + self.retry_count = kwargs.get('retry_count', None) + self.last_retry_time = kwargs.get('last_retry_time', None) + self.result = kwargs.get('result', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/start_task_information_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/start_task_information_py3.py new file mode 100644 index 00000000..cb434ab2 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/start_task_information_py3.py @@ -0,0 +1,99 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class StartTaskInformation(Model): + """Information about a start task running on a compute node. + + All required parameters must be populated in order to send to Azure. + + :param state: Required. The state of the start task on the compute node. + Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.StartTaskState + :param start_time: Required. The time at which the start task started + running. This value is reset every time the task is restarted or retried + (that is, this is the most recent time at which the start task started + running). + :type start_time: datetime + :param end_time: The time at which the start task stopped running. This is + the end time of the most recent run of the start task, if that run has + completed (even if that run failed and a retry is pending). This element + is not present if the start task is currently running. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the start task + command line. This property is set only if the start task is in the + completed state. In general, the exit code for a process reflects the + specific convention implemented by the application developer for that + process. If you use the exit code value to make decisions in your code, be + sure that you know the exit code convention used by the application + process. However, if the Batch service terminates the start task (due to + timeout, or user termination via the API) you may see an operating + system-defined exit code. + :type exit_code: int + :param container_info: Information about the container under which the + task is executing. This property is set only if the task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the task failure, if any. This + property is set only if the task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the task could not be run) and + file upload errors are not retried. The Batch service will retry the task + up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the task + started running. This element is present only if the task was retried + (i.e. retryCount is nonzero). If present, this is typically the same as + startTime, but may be different if the task has been restarted for reasons + other than retry; for example, if the compute node was rebooted during a + retry, then the startTime is updated but the lastRetryTime is not. + :type last_retry_time: datetime + :param result: The result of the task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'state': {'required': True}, + 'start_time': {'required': True}, + 'retry_count': {'required': True}, + } + + _attribute_map = { + 'state': {'key': 'state', 'type': 'StartTaskState'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, *, state, start_time, retry_count: int, end_time=None, exit_code: int=None, container_info=None, failure_info=None, last_retry_time=None, result=None, **kwargs) -> None: + super(StartTaskInformation, self).__init__(**kwargs) + self.state = state + self.start_time = start_time + self.end_time = end_time + self.exit_code = exit_code + self.container_info = container_info + self.failure_info = failure_info + self.retry_count = retry_count + self.last_retry_time = last_retry_time + self.result = result diff --git a/azext/generated/sdk/batch/v2018_12_01/models/start_task_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/start_task_py3.py new file mode 100644 index 00000000..c695032c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/start_task_py3.py @@ -0,0 +1,114 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class StartTask(Model): + """A task which is run when a compute node joins a pool in the Azure Batch + service, or when the compute node is rebooted or reimaged. + + Batch will retry tasks when a recovery operation is triggered on a compute + node. Examples of recovery operations include (but are not limited to) when + an unhealthy compute node is rebooted or a compute node disappeared due to + host failure. Retries due to recovery operations are independent of and are + not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is + 0, an internal retry due to a recovery operation may occur. Because of + this, all tasks should be idempotent. This means tasks need to tolerate + being interrupted and restarted without causing any corruption or duplicate + data. The best practice for long running tasks is to use some form of + checkpointing. In some cases the start task may be re-run even though the + node was not rebooted. Special care should be taken to avoid start tasks + which create breakaway process or install/launch services from the start + task working directory, as this will block Batch from being able to re-run + the start task. + + All required parameters must be populated in order to send to Azure. + + :param command_line: Required. The command line of the start task. The + command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + start task runs. When this is specified, all directories recursively below + the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the + node) are mapped into the container, all task environment variables are + mapped into the container, and the task command line is executed in the + container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. Files listed under this + element are located in the task's working directory. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the start task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param user_identity: The user identity under which the start task runs. + If omitted, the task runs as a non-administrative user unique to the task. + :type user_identity: ~azure.batch.models.UserIdentity + :param max_task_retry_count: The maximum number of times the task may be + retried. The Batch service retries a task if its exit code is nonzero. + Note that this value specifically controls the number of retries. The + Batch service will try the task once, and may then retry up to this limit. + For example, if the maximum retry count is 3, Batch tries the task up to 4 + times (one initial try and 3 retries). If the maximum retry count is 0, + the Batch service does not retry the task. If the maximum retry count is + -1, the Batch service retries the task without limit. + :type max_task_retry_count: int + :param wait_for_success: Whether the Batch service should wait for the + start task to complete successfully (that is, to exit with exit code 0) + before scheduling any tasks on the compute node. If true and the start + task fails on a compute node, the Batch service retries the start task up + to its maximum retry count (maxTaskRetryCount). If the task has still not + completed successfully after all retries, then the Batch service marks the + compute node unusable, and will not schedule tasks to it. This condition + can be detected via the node state and failure info details. If false, the + Batch service will not wait for the start task to complete. In this case, + other tasks can start executing on the compute node while the start task + is still running; and even if the start task fails, new tasks will + continue to be scheduled on the node. The default is false. + :type wait_for_success: bool + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + 'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'}, + } + + def __init__(self, *, command_line: str, container_settings=None, resource_files=None, environment_settings=None, user_identity=None, max_task_retry_count: int=None, wait_for_success: bool=None, **kwargs) -> None: + super(StartTask, self).__init__(**kwargs) + self.command_line = command_line + self.container_settings = container_settings + self.resource_files = resource_files + self.environment_settings = environment_settings + self.user_identity = user_identity + self.max_task_retry_count = max_task_retry_count + self.wait_for_success = wait_for_success diff --git a/azext/generated/sdk/batch/v2018_12_01/models/subtask_information.py b/azext/generated/sdk/batch/v2018_12_01/models/subtask_information.py new file mode 100644 index 00000000..dbbff704 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/subtask_information.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class SubtaskInformation(Model): + """Information about an Azure Batch subtask. + + :param id: The ID of the subtask. + :type id: int + :param node_info: Information about the compute node on which the subtask + ran. + :type node_info: ~azure.batch.models.ComputeNodeInformation + :param start_time: The time at which the subtask started running. If the + subtask has been restarted or retried, this is the most recent time at + which the subtask started running. + :type start_time: datetime + :param end_time: The time at which the subtask completed. This property is + set only if the subtask is in the Completed state. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the subtask + command line. This property is set only if the subtask is in the completed + state. In general, the exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. + However, if the Batch service terminates the subtask (due to timeout, or + user termination via the API) you may see an operating system-defined exit + code. + :type exit_code: int + :param container_info: Information about the container under which the + task is executing. This property is set only if the task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the task failure, if any. This + property is set only if the task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param state: The current state of the subtask. Possible values include: + 'preparing', 'running', 'completed' + :type state: str or ~azure.batch.models.SubtaskState + :param state_transition_time: The time at which the subtask entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the subtask. This property is + not set if the subtask is in its initial running state. Possible values + include: 'preparing', 'running', 'completed' + :type previous_state: str or ~azure.batch.models.SubtaskState + :param previous_state_transition_time: The time at which the subtask + entered its previous state. This property is not set if the subtask is in + its initial running state. + :type previous_state_transition_time: datetime + :param result: The result of the task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'int'}, + 'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'state': {'key': 'state', 'type': 'SubtaskState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'SubtaskState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, **kwargs): + super(SubtaskInformation, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.node_info = kwargs.get('node_info', None) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.exit_code = kwargs.get('exit_code', None) + self.container_info = kwargs.get('container_info', None) + self.failure_info = kwargs.get('failure_info', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.previous_state = kwargs.get('previous_state', None) + self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) + self.result = kwargs.get('result', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/subtask_information_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/subtask_information_py3.py new file mode 100644 index 00000000..1399c866 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/subtask_information_py3.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class SubtaskInformation(Model): + """Information about an Azure Batch subtask. + + :param id: The ID of the subtask. + :type id: int + :param node_info: Information about the compute node on which the subtask + ran. + :type node_info: ~azure.batch.models.ComputeNodeInformation + :param start_time: The time at which the subtask started running. If the + subtask has been restarted or retried, this is the most recent time at + which the subtask started running. + :type start_time: datetime + :param end_time: The time at which the subtask completed. This property is + set only if the subtask is in the Completed state. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the subtask + command line. This property is set only if the subtask is in the completed + state. In general, the exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. + However, if the Batch service terminates the subtask (due to timeout, or + user termination via the API) you may see an operating system-defined exit + code. + :type exit_code: int + :param container_info: Information about the container under which the + task is executing. This property is set only if the task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the task failure, if any. This + property is set only if the task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param state: The current state of the subtask. Possible values include: + 'preparing', 'running', 'completed' + :type state: str or ~azure.batch.models.SubtaskState + :param state_transition_time: The time at which the subtask entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the subtask. This property is + not set if the subtask is in its initial running state. Possible values + include: 'preparing', 'running', 'completed' + :type previous_state: str or ~azure.batch.models.SubtaskState + :param previous_state_transition_time: The time at which the subtask + entered its previous state. This property is not set if the subtask is in + its initial running state. + :type previous_state_transition_time: datetime + :param result: The result of the task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'int'}, + 'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'state': {'key': 'state', 'type': 'SubtaskState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'SubtaskState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, *, id: int=None, node_info=None, start_time=None, end_time=None, exit_code: int=None, container_info=None, failure_info=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, result=None, **kwargs) -> None: + super(SubtaskInformation, self).__init__(**kwargs) + self.id = id + self.node_info = node_info + self.start_time = start_time + self.end_time = end_time + self.exit_code = exit_code + self.container_info = container_info + self.failure_info = failure_info + self.state = state + self.state_transition_time = state_transition_time + self.previous_state = previous_state + self.previous_state_transition_time = previous_state_transition_time + self.result = result diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_add_collection_options.py b/azext/generated/sdk/batch/v2018_12_01/models/task_add_collection_options.py new file mode 100644 index 00000000..f0622c9c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_add_collection_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionOptions(Model): + """Additional parameters for add_collection operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskAddCollectionOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_add_collection_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_add_collection_options_py3.py new file mode 100644 index 00000000..634f522c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_add_collection_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionOptions(Model): + """Additional parameters for add_collection operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(TaskAddCollectionOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_add_collection_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/task_add_collection_parameter.py new file mode 100644 index 00000000..56b615c4 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_add_collection_parameter.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionParameter(Model): + """A collection of Azure Batch tasks to add. + + All required parameters must be populated in order to send to Azure. + + :param value: Required. The collection of tasks to add. The maximum count + of tasks is 100. The total serialized size of this collection must be less + than 1MB. If it is greater than 1MB (for example if each task has 100's of + resource files or environment variables), the request will fail with code + 'RequestBodyTooLarge' and should be retried again with fewer tasks. + :type value: list[~azure.batch.models.TaskAddParameter] + """ + + _validation = { + 'value': {'required': True, 'max_items': 100}, + } + + _attribute_map = { + 'value': {'key': 'value', 'type': '[TaskAddParameter]'}, + } + + def __init__(self, **kwargs): + super(TaskAddCollectionParameter, self).__init__(**kwargs) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_add_collection_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_add_collection_parameter_py3.py new file mode 100644 index 00000000..bfeaf536 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_add_collection_parameter_py3.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionParameter(Model): + """A collection of Azure Batch tasks to add. + + All required parameters must be populated in order to send to Azure. + + :param value: Required. The collection of tasks to add. The maximum count + of tasks is 100. The total serialized size of this collection must be less + than 1MB. If it is greater than 1MB (for example if each task has 100's of + resource files or environment variables), the request will fail with code + 'RequestBodyTooLarge' and should be retried again with fewer tasks. + :type value: list[~azure.batch.models.TaskAddParameter] + """ + + _validation = { + 'value': {'required': True, 'max_items': 100}, + } + + _attribute_map = { + 'value': {'key': 'value', 'type': '[TaskAddParameter]'}, + } + + def __init__(self, *, value, **kwargs) -> None: + super(TaskAddCollectionParameter, self).__init__(**kwargs) + self.value = value diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_add_collection_result.py b/azext/generated/sdk/batch/v2018_12_01/models/task_add_collection_result.py new file mode 100644 index 00000000..0dbc1420 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_add_collection_result.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionResult(Model): + """The result of adding a collection of tasks to a job. + + :param value: The results of the add task collection operation. + :type value: list[~azure.batch.models.TaskAddResult] + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[TaskAddResult]'}, + } + + def __init__(self, **kwargs): + super(TaskAddCollectionResult, self).__init__(**kwargs) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_add_collection_result_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_add_collection_result_py3.py new file mode 100644 index 00000000..06cde63a --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_add_collection_result_py3.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionResult(Model): + """The result of adding a collection of tasks to a job. + + :param value: The results of the add task collection operation. + :type value: list[~azure.batch.models.TaskAddResult] + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[TaskAddResult]'}, + } + + def __init__(self, *, value=None, **kwargs) -> None: + super(TaskAddCollectionResult, self).__init__(**kwargs) + self.value = value diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_add_options.py b/azext/generated/sdk/batch/v2018_12_01/models/task_add_options.py new file mode 100644 index 00000000..667cc19d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_add_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskAddOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_add_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_add_options_py3.py new file mode 100644 index 00000000..da9c6a8c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_add_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(TaskAddOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_add_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/task_add_parameter.py new file mode 100644 index 00000000..a509aa63 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_add_parameter.py @@ -0,0 +1,167 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddParameter(Model): + """An Azure Batch task to add. + + Batch will retry tasks when a recovery operation is triggered on a compute + node. Examples of recovery operations include (but are not limited to) when + an unhealthy compute node is rebooted or a compute node disappeared due to + host failure. Retries due to recovery operations are independent of and are + not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is + 0, an internal retry due to a recovery operation may occur. Because of + this, all tasks should be idempotent. This means tasks need to tolerate + being interrupted and restarted without causing any corruption or duplicate + data. The best practice for long running tasks is to use some form of + checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the task within the + job. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within a job that differ only by case). + :type id: str + :param display_name: A display name for the task. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param command_line: Required. The command line of the task. For + multi-instance tasks, the command line is executed as the primary task, + after the primary task and all subtasks have finished executing the + coordination command line. The command line does not run under a shell, + and therefore cannot take advantage of shell features such as environment + variable expansion. If you want to take advantage of such features, you + should invoke the shell in the command line, for example using "cmd /c + MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command + line refers to file paths, it should use a relative path (relative to the + task working directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + task runs. If the pool that will run this task has containerConfiguration + set, this must be set as well. If the pool that will run this task doesn't + have containerConfiguration set, this must not be set. When this is + specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR + (the root of Azure Batch directories on the node) are mapped into the + container, all task environment variables are mapped into the container, + and the task command line is executed in the container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param exit_conditions: How the Batch service should respond when the task + completes. + :type exit_conditions: ~azure.batch.models.ExitConditions + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. For + multi-instance tasks, the resource files will only be downloaded to the + compute node on which the primary task is executed. There is a maximum + size for the list of resource files. When the max size is exceeded, the + request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the compute node after running the command line. For multi-instance + tasks, the files will only be uploaded from the compute node on which the + primary task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param affinity_info: A locality hint that can be used by the Batch + service to select a compute node on which to start the new task. + :type affinity_info: ~azure.batch.models.AffinityInformation + :param constraints: The execution constraints that apply to this task. If + you do not specify constraints, the maxTaskRetryCount is the + maxTaskRetryCount specified for the job, the maxWallClockTime is infinite, + and the retentionTime is 7 days. + :type constraints: ~azure.batch.models.TaskConstraints + :param user_identity: The user identity under which the task runs. If + omitted, the task runs as a non-administrative user unique to the task. + :type user_identity: ~azure.batch.models.UserIdentity + :param multi_instance_settings: An object that indicates that the task is + a multi-instance task, and contains information about how to run the + multi-instance task. + :type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings + :param depends_on: The tasks that this task depends on. This task will not + be scheduled until all tasks that it depends on have completed + successfully. If any of those tasks fail and exhaust their retry counts, + this task will never be scheduled. If the job does not have + usesTaskDependencies set to true, and this element is present, the request + fails with error code TaskDependenciesNotSpecifiedOnJob. + :type depends_on: ~azure.batch.models.TaskDependencies + :param application_package_references: A list of application packages that + the Batch service will deploy to the compute node before running the + command line. Application packages are downloaded and deployed to a shared + directory, not the task working directory. Therefore, if a referenced + package is already on the compute node, and is up to date, then it is not + re-downloaded; the existing copy on the compute node is used. If a + referenced application package cannot be installed, for example because + the package has been deleted or because download failed, the task fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the task can use to perform Batch service operations. If this + property is set, the Batch service provides the task with an + authentication token which can be used to authenticate Batch service + operations without requiring an account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the task can carry out using the token depend on the settings. For + example, a task can request job permissions in order to add other tasks to + the job, or check the status of the job or of other tasks under the job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + """ + + _validation = { + 'id': {'required': True}, + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'}, + 'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + } + + def __init__(self, **kwargs): + super(TaskAddParameter, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.exit_conditions = kwargs.get('exit_conditions', None) + self.resource_files = kwargs.get('resource_files', None) + self.output_files = kwargs.get('output_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.affinity_info = kwargs.get('affinity_info', None) + self.constraints = kwargs.get('constraints', None) + self.user_identity = kwargs.get('user_identity', None) + self.multi_instance_settings = kwargs.get('multi_instance_settings', None) + self.depends_on = kwargs.get('depends_on', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.authentication_token_settings = kwargs.get('authentication_token_settings', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_add_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_add_parameter_py3.py new file mode 100644 index 00000000..0e72123d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_add_parameter_py3.py @@ -0,0 +1,167 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddParameter(Model): + """An Azure Batch task to add. + + Batch will retry tasks when a recovery operation is triggered on a compute + node. Examples of recovery operations include (but are not limited to) when + an unhealthy compute node is rebooted or a compute node disappeared due to + host failure. Retries due to recovery operations are independent of and are + not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is + 0, an internal retry due to a recovery operation may occur. Because of + this, all tasks should be idempotent. This means tasks need to tolerate + being interrupted and restarted without causing any corruption or duplicate + data. The best practice for long running tasks is to use some form of + checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the task within the + job. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within a job that differ only by case). + :type id: str + :param display_name: A display name for the task. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param command_line: Required. The command line of the task. For + multi-instance tasks, the command line is executed as the primary task, + after the primary task and all subtasks have finished executing the + coordination command line. The command line does not run under a shell, + and therefore cannot take advantage of shell features such as environment + variable expansion. If you want to take advantage of such features, you + should invoke the shell in the command line, for example using "cmd /c + MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command + line refers to file paths, it should use a relative path (relative to the + task working directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + task runs. If the pool that will run this task has containerConfiguration + set, this must be set as well. If the pool that will run this task doesn't + have containerConfiguration set, this must not be set. When this is + specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR + (the root of Azure Batch directories on the node) are mapped into the + container, all task environment variables are mapped into the container, + and the task command line is executed in the container. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param exit_conditions: How the Batch service should respond when the task + completes. + :type exit_conditions: ~azure.batch.models.ExitConditions + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. For + multi-instance tasks, the resource files will only be downloaded to the + compute node on which the primary task is executed. There is a maximum + size for the list of resource files. When the max size is exceeded, the + request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the compute node after running the command line. For multi-instance + tasks, the files will only be uploaded from the compute node on which the + primary task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param affinity_info: A locality hint that can be used by the Batch + service to select a compute node on which to start the new task. + :type affinity_info: ~azure.batch.models.AffinityInformation + :param constraints: The execution constraints that apply to this task. If + you do not specify constraints, the maxTaskRetryCount is the + maxTaskRetryCount specified for the job, the maxWallClockTime is infinite, + and the retentionTime is 7 days. + :type constraints: ~azure.batch.models.TaskConstraints + :param user_identity: The user identity under which the task runs. If + omitted, the task runs as a non-administrative user unique to the task. + :type user_identity: ~azure.batch.models.UserIdentity + :param multi_instance_settings: An object that indicates that the task is + a multi-instance task, and contains information about how to run the + multi-instance task. + :type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings + :param depends_on: The tasks that this task depends on. This task will not + be scheduled until all tasks that it depends on have completed + successfully. If any of those tasks fail and exhaust their retry counts, + this task will never be scheduled. If the job does not have + usesTaskDependencies set to true, and this element is present, the request + fails with error code TaskDependenciesNotSpecifiedOnJob. + :type depends_on: ~azure.batch.models.TaskDependencies + :param application_package_references: A list of application packages that + the Batch service will deploy to the compute node before running the + command line. Application packages are downloaded and deployed to a shared + directory, not the task working directory. Therefore, if a referenced + package is already on the compute node, and is up to date, then it is not + re-downloaded; the existing copy on the compute node is used. If a + referenced application package cannot be installed, for example because + the package has been deleted or because download failed, the task fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the task can use to perform Batch service operations. If this + property is set, the Batch service provides the task with an + authentication token which can be used to authenticate Batch service + operations without requiring an account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the task can carry out using the token depend on the settings. For + example, a task can request job permissions in order to add other tasks to + the job, or check the status of the job or of other tasks under the job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + """ + + _validation = { + 'id': {'required': True}, + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'}, + 'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + } + + def __init__(self, *, id: str, command_line: str, display_name: str=None, container_settings=None, exit_conditions=None, resource_files=None, output_files=None, environment_settings=None, affinity_info=None, constraints=None, user_identity=None, multi_instance_settings=None, depends_on=None, application_package_references=None, authentication_token_settings=None, **kwargs) -> None: + super(TaskAddParameter, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.command_line = command_line + self.container_settings = container_settings + self.exit_conditions = exit_conditions + self.resource_files = resource_files + self.output_files = output_files + self.environment_settings = environment_settings + self.affinity_info = affinity_info + self.constraints = constraints + self.user_identity = user_identity + self.multi_instance_settings = multi_instance_settings + self.depends_on = depends_on + self.application_package_references = application_package_references + self.authentication_token_settings = authentication_token_settings diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_add_result.py b/azext/generated/sdk/batch/v2018_12_01/models/task_add_result.py new file mode 100644 index 00000000..7528e30d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_add_result.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddResult(Model): + """Result for a single task added as part of an add task collection operation. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. The status of the add task request. Possible + values include: 'success', 'clientError', 'serverError' + :type status: str or ~azure.batch.models.TaskAddStatus + :param task_id: Required. The ID of the task for which this is the result. + :type task_id: str + :param e_tag: The ETag of the task, if the task was successfully added. + You can use this to detect whether the task has changed between requests. + In particular, you can be pass the ETag with an Update Task request to + specify that your changes should take effect only if nobody else has + modified the job in the meantime. + :type e_tag: str + :param last_modified: The last modified time of the task. + :type last_modified: datetime + :param location: The URL of the task, if the task was successfully added. + :type location: str + :param error: The error encountered while attempting to add the task. + :type error: ~azure.batch.models.BatchError + """ + + _validation = { + 'status': {'required': True}, + 'task_id': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'TaskAddStatus'}, + 'task_id': {'key': 'taskId', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'location': {'key': 'location', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'BatchError'}, + } + + def __init__(self, **kwargs): + super(TaskAddResult, self).__init__(**kwargs) + self.status = kwargs.get('status', None) + self.task_id = kwargs.get('task_id', None) + self.e_tag = kwargs.get('e_tag', None) + self.last_modified = kwargs.get('last_modified', None) + self.location = kwargs.get('location', None) + self.error = kwargs.get('error', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_add_result_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_add_result_py3.py new file mode 100644 index 00000000..7add806b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_add_result_py3.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddResult(Model): + """Result for a single task added as part of an add task collection operation. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. The status of the add task request. Possible + values include: 'success', 'clientError', 'serverError' + :type status: str or ~azure.batch.models.TaskAddStatus + :param task_id: Required. The ID of the task for which this is the result. + :type task_id: str + :param e_tag: The ETag of the task, if the task was successfully added. + You can use this to detect whether the task has changed between requests. + In particular, you can be pass the ETag with an Update Task request to + specify that your changes should take effect only if nobody else has + modified the job in the meantime. + :type e_tag: str + :param last_modified: The last modified time of the task. + :type last_modified: datetime + :param location: The URL of the task, if the task was successfully added. + :type location: str + :param error: The error encountered while attempting to add the task. + :type error: ~azure.batch.models.BatchError + """ + + _validation = { + 'status': {'required': True}, + 'task_id': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'TaskAddStatus'}, + 'task_id': {'key': 'taskId', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'location': {'key': 'location', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'BatchError'}, + } + + def __init__(self, *, status, task_id: str, e_tag: str=None, last_modified=None, location: str=None, error=None, **kwargs) -> None: + super(TaskAddResult, self).__init__(**kwargs) + self.status = status + self.task_id = task_id + self.e_tag = e_tag + self.last_modified = last_modified + self.location = location + self.error = error diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_constraints.py b/azext/generated/sdk/batch/v2018_12_01/models/task_constraints.py new file mode 100644 index 00000000..98ad92cc --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_constraints.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskConstraints(Model): + """Execution constraints to apply to a task. + + :param max_wall_clock_time: The maximum elapsed time that the task may + run, measured from the time the task starts. If the task does not complete + within the time limit, the Batch service terminates it. If this is not + specified, there is no time limit on how long the task may run. + :type max_wall_clock_time: timedelta + :param retention_time: The minimum time to retain the task directory on + the compute node where it ran, from the time it completes execution. After + this time, the Batch service may delete the task directory and all its + contents. The default is 7 days, i.e. the task directory will be retained + for 7 days unless the compute node is removed or the job is deleted. + :type retention_time: timedelta + :param max_task_retry_count: The maximum number of times the task may be + retried. The Batch service retries a task if its exit code is nonzero. + Note that this value specifically controls the number of retries for the + task executable due to a nonzero exit code. The Batch service will try the + task once, and may then retry up to this limit. For example, if the + maximum retry count is 3, Batch tries the task up to 4 times (one initial + try and 3 retries). If the maximum retry count is 0, the Batch service + does not retry the task after the first attempt. If the maximum retry + count is -1, the Batch service retries the task without limit. + :type max_task_retry_count: int + """ + + _attribute_map = { + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'retention_time': {'key': 'retentionTime', 'type': 'duration'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(TaskConstraints, self).__init__(**kwargs) + self.max_wall_clock_time = kwargs.get('max_wall_clock_time', None) + self.retention_time = kwargs.get('retention_time', None) + self.max_task_retry_count = kwargs.get('max_task_retry_count', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_constraints_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_constraints_py3.py new file mode 100644 index 00000000..db8da126 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_constraints_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskConstraints(Model): + """Execution constraints to apply to a task. + + :param max_wall_clock_time: The maximum elapsed time that the task may + run, measured from the time the task starts. If the task does not complete + within the time limit, the Batch service terminates it. If this is not + specified, there is no time limit on how long the task may run. + :type max_wall_clock_time: timedelta + :param retention_time: The minimum time to retain the task directory on + the compute node where it ran, from the time it completes execution. After + this time, the Batch service may delete the task directory and all its + contents. The default is 7 days, i.e. the task directory will be retained + for 7 days unless the compute node is removed or the job is deleted. + :type retention_time: timedelta + :param max_task_retry_count: The maximum number of times the task may be + retried. The Batch service retries a task if its exit code is nonzero. + Note that this value specifically controls the number of retries for the + task executable due to a nonzero exit code. The Batch service will try the + task once, and may then retry up to this limit. For example, if the + maximum retry count is 3, Batch tries the task up to 4 times (one initial + try and 3 retries). If the maximum retry count is 0, the Batch service + does not retry the task after the first attempt. If the maximum retry + count is -1, the Batch service retries the task without limit. + :type max_task_retry_count: int + """ + + _attribute_map = { + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'retention_time': {'key': 'retentionTime', 'type': 'duration'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + } + + def __init__(self, *, max_wall_clock_time=None, retention_time=None, max_task_retry_count: int=None, **kwargs) -> None: + super(TaskConstraints, self).__init__(**kwargs) + self.max_wall_clock_time = max_wall_clock_time + self.retention_time = retention_time + self.max_task_retry_count = max_task_retry_count diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_container_execution_information.py b/azext/generated/sdk/batch/v2018_12_01/models/task_container_execution_information.py new file mode 100644 index 00000000..6ade9177 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_container_execution_information.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskContainerExecutionInformation(Model): + """Contains information about the container which a task is executing. + + :param container_id: The ID of the container. + :type container_id: str + :param state: The state of the container. This is the state of the + container according to the Docker service. It is equivalent to the status + field returned by "docker inspect". + :type state: str + :param error: Detailed error information about the container. This is the + detailed error string from the Docker service, if available. It is + equivalent to the error field returned by "docker inspect". + :type error: str + """ + + _attribute_map = { + 'container_id': {'key': 'containerId', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(TaskContainerExecutionInformation, self).__init__(**kwargs) + self.container_id = kwargs.get('container_id', None) + self.state = kwargs.get('state', None) + self.error = kwargs.get('error', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_container_execution_information_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_container_execution_information_py3.py new file mode 100644 index 00000000..44f9e7e6 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_container_execution_information_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskContainerExecutionInformation(Model): + """Contains information about the container which a task is executing. + + :param container_id: The ID of the container. + :type container_id: str + :param state: The state of the container. This is the state of the + container according to the Docker service. It is equivalent to the status + field returned by "docker inspect". + :type state: str + :param error: Detailed error information about the container. This is the + detailed error string from the Docker service, if available. It is + equivalent to the error field returned by "docker inspect". + :type error: str + """ + + _attribute_map = { + 'container_id': {'key': 'containerId', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'str'}, + } + + def __init__(self, *, container_id: str=None, state: str=None, error: str=None, **kwargs) -> None: + super(TaskContainerExecutionInformation, self).__init__(**kwargs) + self.container_id = container_id + self.state = state + self.error = error diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_container_settings.py b/azext/generated/sdk/batch/v2018_12_01/models/task_container_settings.py new file mode 100644 index 00000000..ac1a56f8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_container_settings.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskContainerSettings(Model): + """The container settings for a task. + + All required parameters must be populated in order to send to Azure. + + :param container_run_options: Additional options to the container create + command. These additional options are supplied as arguments to the "docker + create" command, in addition to those controlled by the Batch Service. + :type container_run_options: str + :param image_name: Required. The image to use to create the container in + which the task will run. This is the full image reference, as would be + specified to "docker pull". If no tag is provided as part of the image + name, the tag ":latest" is used as a default. + :type image_name: str + :param registry: The private registry which contains the container image. + This setting can be omitted if was already provided at pool creation. + :type registry: ~azure.batch.models.ContainerRegistry + """ + + _validation = { + 'image_name': {'required': True}, + } + + _attribute_map = { + 'container_run_options': {'key': 'containerRunOptions', 'type': 'str'}, + 'image_name': {'key': 'imageName', 'type': 'str'}, + 'registry': {'key': 'registry', 'type': 'ContainerRegistry'}, + } + + def __init__(self, **kwargs): + super(TaskContainerSettings, self).__init__(**kwargs) + self.container_run_options = kwargs.get('container_run_options', None) + self.image_name = kwargs.get('image_name', None) + self.registry = kwargs.get('registry', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_container_settings_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_container_settings_py3.py new file mode 100644 index 00000000..dabd7b99 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_container_settings_py3.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskContainerSettings(Model): + """The container settings for a task. + + All required parameters must be populated in order to send to Azure. + + :param container_run_options: Additional options to the container create + command. These additional options are supplied as arguments to the "docker + create" command, in addition to those controlled by the Batch Service. + :type container_run_options: str + :param image_name: Required. The image to use to create the container in + which the task will run. This is the full image reference, as would be + specified to "docker pull". If no tag is provided as part of the image + name, the tag ":latest" is used as a default. + :type image_name: str + :param registry: The private registry which contains the container image. + This setting can be omitted if was already provided at pool creation. + :type registry: ~azure.batch.models.ContainerRegistry + """ + + _validation = { + 'image_name': {'required': True}, + } + + _attribute_map = { + 'container_run_options': {'key': 'containerRunOptions', 'type': 'str'}, + 'image_name': {'key': 'imageName', 'type': 'str'}, + 'registry': {'key': 'registry', 'type': 'ContainerRegistry'}, + } + + def __init__(self, *, image_name: str, container_run_options: str=None, registry=None, **kwargs) -> None: + super(TaskContainerSettings, self).__init__(**kwargs) + self.container_run_options = container_run_options + self.image_name = image_name + self.registry = registry diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_counts.py b/azext/generated/sdk/batch/v2018_12_01/models/task_counts.py new file mode 100644 index 00000000..057f2d7a --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_counts.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskCounts(Model): + """The task counts for a job. + + All required parameters must be populated in order to send to Azure. + + :param active: Required. The number of tasks in the active state. + :type active: int + :param running: Required. The number of tasks in the running or preparing + state. + :type running: int + :param completed: Required. The number of tasks in the completed state. + :type completed: int + :param succeeded: Required. The number of tasks which succeeded. A task + succeeds if its result (found in the executionInfo property) is 'success'. + :type succeeded: int + :param failed: Required. The number of tasks which failed. A task fails if + its result (found in the executionInfo property) is 'failure'. + :type failed: int + """ + + _validation = { + 'active': {'required': True}, + 'running': {'required': True}, + 'completed': {'required': True}, + 'succeeded': {'required': True}, + 'failed': {'required': True}, + } + + _attribute_map = { + 'active': {'key': 'active', 'type': 'int'}, + 'running': {'key': 'running', 'type': 'int'}, + 'completed': {'key': 'completed', 'type': 'int'}, + 'succeeded': {'key': 'succeeded', 'type': 'int'}, + 'failed': {'key': 'failed', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(TaskCounts, self).__init__(**kwargs) + self.active = kwargs.get('active', None) + self.running = kwargs.get('running', None) + self.completed = kwargs.get('completed', None) + self.succeeded = kwargs.get('succeeded', None) + self.failed = kwargs.get('failed', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_counts_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_counts_py3.py new file mode 100644 index 00000000..623c7dd8 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_counts_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskCounts(Model): + """The task counts for a job. + + All required parameters must be populated in order to send to Azure. + + :param active: Required. The number of tasks in the active state. + :type active: int + :param running: Required. The number of tasks in the running or preparing + state. + :type running: int + :param completed: Required. The number of tasks in the completed state. + :type completed: int + :param succeeded: Required. The number of tasks which succeeded. A task + succeeds if its result (found in the executionInfo property) is 'success'. + :type succeeded: int + :param failed: Required. The number of tasks which failed. A task fails if + its result (found in the executionInfo property) is 'failure'. + :type failed: int + """ + + _validation = { + 'active': {'required': True}, + 'running': {'required': True}, + 'completed': {'required': True}, + 'succeeded': {'required': True}, + 'failed': {'required': True}, + } + + _attribute_map = { + 'active': {'key': 'active', 'type': 'int'}, + 'running': {'key': 'running', 'type': 'int'}, + 'completed': {'key': 'completed', 'type': 'int'}, + 'succeeded': {'key': 'succeeded', 'type': 'int'}, + 'failed': {'key': 'failed', 'type': 'int'}, + } + + def __init__(self, *, active: int, running: int, completed: int, succeeded: int, failed: int, **kwargs) -> None: + super(TaskCounts, self).__init__(**kwargs) + self.active = active + self.running = running + self.completed = completed + self.succeeded = succeeded + self.failed = failed diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_delete_options.py b/azext/generated/sdk/batch/v2018_12_01/models/task_delete_options.py new file mode 100644 index 00000000..2daf7608 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_delete_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskDeleteOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_delete_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_delete_options_py3.py new file mode 100644 index 00000000..4b836c65 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_delete_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(TaskDeleteOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_dependencies.py b/azext/generated/sdk/batch/v2018_12_01/models/task_dependencies.py new file mode 100644 index 00000000..f5bfb8c4 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_dependencies.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskDependencies(Model): + """Specifies any dependencies of a task. Any task that is explicitly specified + or within a dependency range must complete before the dependant task will + be scheduled. + + :param task_ids: The list of task IDs that this task depends on. All tasks + in this list must complete successfully before the dependent task can be + scheduled. The taskIds collection is limited to 64000 characters total + (i.e. the combined length of all task IDs). If the taskIds collection + exceeds the maximum length, the Add Task request fails with error code + TaskDependencyListTooLong. In this case consider using task ID ranges + instead. + :type task_ids: list[str] + :param task_id_ranges: The list of task ID ranges that this task depends + on. All tasks in all ranges must complete successfully before the + dependent task can be scheduled. + :type task_id_ranges: list[~azure.batch.models.TaskIdRange] + """ + + _attribute_map = { + 'task_ids': {'key': 'taskIds', 'type': '[str]'}, + 'task_id_ranges': {'key': 'taskIdRanges', 'type': '[TaskIdRange]'}, + } + + def __init__(self, **kwargs): + super(TaskDependencies, self).__init__(**kwargs) + self.task_ids = kwargs.get('task_ids', None) + self.task_id_ranges = kwargs.get('task_id_ranges', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_dependencies_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_dependencies_py3.py new file mode 100644 index 00000000..133f3268 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_dependencies_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskDependencies(Model): + """Specifies any dependencies of a task. Any task that is explicitly specified + or within a dependency range must complete before the dependant task will + be scheduled. + + :param task_ids: The list of task IDs that this task depends on. All tasks + in this list must complete successfully before the dependent task can be + scheduled. The taskIds collection is limited to 64000 characters total + (i.e. the combined length of all task IDs). If the taskIds collection + exceeds the maximum length, the Add Task request fails with error code + TaskDependencyListTooLong. In this case consider using task ID ranges + instead. + :type task_ids: list[str] + :param task_id_ranges: The list of task ID ranges that this task depends + on. All tasks in all ranges must complete successfully before the + dependent task can be scheduled. + :type task_id_ranges: list[~azure.batch.models.TaskIdRange] + """ + + _attribute_map = { + 'task_ids': {'key': 'taskIds', 'type': '[str]'}, + 'task_id_ranges': {'key': 'taskIdRanges', 'type': '[TaskIdRange]'}, + } + + def __init__(self, *, task_ids=None, task_id_ranges=None, **kwargs) -> None: + super(TaskDependencies, self).__init__(**kwargs) + self.task_ids = task_ids + self.task_id_ranges = task_id_ranges diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_execution_information.py b/azext/generated/sdk/batch/v2018_12_01/models/task_execution_information.py new file mode 100644 index 00000000..97e313dd --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_execution_information.py @@ -0,0 +1,109 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskExecutionInformation(Model): + """Information about the execution of a task. + + All required parameters must be populated in order to send to Azure. + + :param start_time: The time at which the task started running. 'Running' + corresponds to the running state, so if the task specifies resource files + or application packages, then the start time reflects the time at which + the task started downloading or deploying these. If the task has been + restarted or retried, this is the most recent time at which the task + started running. This property is present only for tasks that are in the + running or completed state. + :type start_time: datetime + :param end_time: The time at which the task completed. This property is + set only if the task is in the Completed state. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the task + command line. This property is set only if the task is in the completed + state. In general, the exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. + However, if the Batch service terminates the task (due to timeout, or user + termination via the API) you may see an operating system-defined exit + code. + :type exit_code: int + :param container_info: Information about the container under which the + task is executing. This property is set only if the task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the task failure, if any. This + property is set only if the task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the task could not be run) and + file upload errors are not retried. The Batch service will retry the task + up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the task + started running. This element is present only if the task was retried + (i.e. retryCount is nonzero). If present, this is typically the same as + startTime, but may be different if the task has been restarted for reasons + other than retry; for example, if the compute node was rebooted during a + retry, then the startTime is updated but the lastRetryTime is not. + :type last_retry_time: datetime + :param requeue_count: Required. The number of times the task has been + requeued by the Batch service as the result of a user request. When the + user removes nodes from a pool (by resizing/shrinking the pool) or when + the job is being disabled, the user can specify that running tasks on the + nodes be requeued for execution. This count tracks how many times the task + has been requeued for these reasons. + :type requeue_count: int + :param last_requeue_time: The most recent time at which the task has been + requeued by the Batch service as the result of a user request. This + property is set only if the requeueCount is nonzero. + :type last_requeue_time: datetime + :param result: The result of the task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'retry_count': {'required': True}, + 'requeue_count': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'requeue_count': {'key': 'requeueCount', 'type': 'int'}, + 'last_requeue_time': {'key': 'lastRequeueTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, **kwargs): + super(TaskExecutionInformation, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.exit_code = kwargs.get('exit_code', None) + self.container_info = kwargs.get('container_info', None) + self.failure_info = kwargs.get('failure_info', None) + self.retry_count = kwargs.get('retry_count', None) + self.last_retry_time = kwargs.get('last_retry_time', None) + self.requeue_count = kwargs.get('requeue_count', None) + self.last_requeue_time = kwargs.get('last_requeue_time', None) + self.result = kwargs.get('result', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_execution_information_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_execution_information_py3.py new file mode 100644 index 00000000..330bb6f4 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_execution_information_py3.py @@ -0,0 +1,109 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskExecutionInformation(Model): + """Information about the execution of a task. + + All required parameters must be populated in order to send to Azure. + + :param start_time: The time at which the task started running. 'Running' + corresponds to the running state, so if the task specifies resource files + or application packages, then the start time reflects the time at which + the task started downloading or deploying these. If the task has been + restarted or retried, this is the most recent time at which the task + started running. This property is present only for tasks that are in the + running or completed state. + :type start_time: datetime + :param end_time: The time at which the task completed. This property is + set only if the task is in the Completed state. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the task + command line. This property is set only if the task is in the completed + state. In general, the exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. + However, if the Batch service terminates the task (due to timeout, or user + termination via the API) you may see an operating system-defined exit + code. + :type exit_code: int + :param container_info: Information about the container under which the + task is executing. This property is set only if the task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the task failure, if any. This + property is set only if the task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the task could not be run) and + file upload errors are not retried. The Batch service will retry the task + up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the task + started running. This element is present only if the task was retried + (i.e. retryCount is nonzero). If present, this is typically the same as + startTime, but may be different if the task has been restarted for reasons + other than retry; for example, if the compute node was rebooted during a + retry, then the startTime is updated but the lastRetryTime is not. + :type last_retry_time: datetime + :param requeue_count: Required. The number of times the task has been + requeued by the Batch service as the result of a user request. When the + user removes nodes from a pool (by resizing/shrinking the pool) or when + the job is being disabled, the user can specify that running tasks on the + nodes be requeued for execution. This count tracks how many times the task + has been requeued for these reasons. + :type requeue_count: int + :param last_requeue_time: The most recent time at which the task has been + requeued by the Batch service as the result of a user request. This + property is set only if the requeueCount is nonzero. + :type last_requeue_time: datetime + :param result: The result of the task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'retry_count': {'required': True}, + 'requeue_count': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'requeue_count': {'key': 'requeueCount', 'type': 'int'}, + 'last_requeue_time': {'key': 'lastRequeueTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, *, retry_count: int, requeue_count: int, start_time=None, end_time=None, exit_code: int=None, container_info=None, failure_info=None, last_retry_time=None, last_requeue_time=None, result=None, **kwargs) -> None: + super(TaskExecutionInformation, self).__init__(**kwargs) + self.start_time = start_time + self.end_time = end_time + self.exit_code = exit_code + self.container_info = container_info + self.failure_info = failure_info + self.retry_count = retry_count + self.last_retry_time = last_retry_time + self.requeue_count = requeue_count + self.last_requeue_time = last_requeue_time + self.result = result diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_failure_information.py b/azext/generated/sdk/batch/v2018_12_01/models/task_failure_information.py new file mode 100644 index 00000000..fc6a45fc --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_failure_information.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskFailureInformation(Model): + """Information about a task failure. + + All required parameters must be populated in order to send to Azure. + + :param category: Required. The category of the task error. Possible values + include: 'userError', 'serverError' + :type category: str or ~azure.batch.models.ErrorCategory + :param code: An identifier for the task error. Codes are invariant and are + intended to be consumed programmatically. + :type code: str + :param message: A message describing the task error, intended to be + suitable for display in a user interface. + :type message: str + :param details: A list of additional details related to the error. + :type details: list[~azure.batch.models.NameValuePair] + """ + + _validation = { + 'category': {'required': True}, + } + + _attribute_map = { + 'category': {'key': 'category', 'type': 'ErrorCategory'}, + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(TaskFailureInformation, self).__init__(**kwargs) + self.category = kwargs.get('category', None) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.details = kwargs.get('details', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_failure_information_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_failure_information_py3.py new file mode 100644 index 00000000..b5eece45 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_failure_information_py3.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskFailureInformation(Model): + """Information about a task failure. + + All required parameters must be populated in order to send to Azure. + + :param category: Required. The category of the task error. Possible values + include: 'userError', 'serverError' + :type category: str or ~azure.batch.models.ErrorCategory + :param code: An identifier for the task error. Codes are invariant and are + intended to be consumed programmatically. + :type code: str + :param message: A message describing the task error, intended to be + suitable for display in a user interface. + :type message: str + :param details: A list of additional details related to the error. + :type details: list[~azure.batch.models.NameValuePair] + """ + + _validation = { + 'category': {'required': True}, + } + + _attribute_map = { + 'category': {'key': 'category', 'type': 'ErrorCategory'}, + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, category, code: str=None, message: str=None, details=None, **kwargs) -> None: + super(TaskFailureInformation, self).__init__(**kwargs) + self.category = category + self.code = code + self.message = message + self.details = details diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_get_options.py b/azext/generated/sdk/batch/v2018_12_01/models/task_get_options.py new file mode 100644 index 00000000..08c1fd8a --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_get_options.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_get_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_get_options_py3.py new file mode 100644 index 00000000..68699028 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_get_options_py3.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(TaskGetOptions, self).__init__(**kwargs) + self.select = select + self.expand = expand + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_id_range.py b/azext/generated/sdk/batch/v2018_12_01/models/task_id_range.py new file mode 100644 index 00000000..db30d858 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_id_range.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskIdRange(Model): + """A range of task IDs that a task can depend on. All tasks with IDs in the + range must complete successfully before the dependent task can be + scheduled. + + The start and end of the range are inclusive. For example, if a range has + start 9 and end 12, then it represents tasks '9', '10', '11' and '12'. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. The first task ID in the range. + :type start: int + :param end: Required. The last task ID in the range. + :type end: int + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'start', 'type': 'int'}, + 'end': {'key': 'end', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(TaskIdRange, self).__init__(**kwargs) + self.start = kwargs.get('start', None) + self.end = kwargs.get('end', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_id_range_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_id_range_py3.py new file mode 100644 index 00000000..446ed8ee --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_id_range_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskIdRange(Model): + """A range of task IDs that a task can depend on. All tasks with IDs in the + range must complete successfully before the dependent task can be + scheduled. + + The start and end of the range are inclusive. For example, if a range has + start 9 and end 12, then it represents tasks '9', '10', '11' and '12'. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. The first task ID in the range. + :type start: int + :param end: Required. The last task ID in the range. + :type end: int + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'start', 'type': 'int'}, + 'end': {'key': 'end', 'type': 'int'}, + } + + def __init__(self, *, start: int, end: int, **kwargs) -> None: + super(TaskIdRange, self).__init__(**kwargs) + self.start = start + self.end = end diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_information.py b/azext/generated/sdk/batch/v2018_12_01/models/task_information.py new file mode 100644 index 00000000..6e8ec0d1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_information.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskInformation(Model): + """Information about a task running on a compute node. + + All required parameters must be populated in order to send to Azure. + + :param task_url: The URL of the task. + :type task_url: str + :param job_id: The ID of the job to which the task belongs. + :type job_id: str + :param task_id: The ID of the task. + :type task_id: str + :param subtask_id: The ID of the subtask if the task is a multi-instance + task. + :type subtask_id: int + :param task_state: Required. The current state of the task. Possible + values include: 'active', 'preparing', 'running', 'completed' + :type task_state: str or ~azure.batch.models.TaskState + :param execution_info: Information about the execution of the task. + :type execution_info: ~azure.batch.models.TaskExecutionInformation + """ + + _validation = { + 'task_state': {'required': True}, + } + + _attribute_map = { + 'task_url': {'key': 'taskUrl', 'type': 'str'}, + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'task_id': {'key': 'taskId', 'type': 'str'}, + 'subtask_id': {'key': 'subtaskId', 'type': 'int'}, + 'task_state': {'key': 'taskState', 'type': 'TaskState'}, + 'execution_info': {'key': 'executionInfo', 'type': 'TaskExecutionInformation'}, + } + + def __init__(self, **kwargs): + super(TaskInformation, self).__init__(**kwargs) + self.task_url = kwargs.get('task_url', None) + self.job_id = kwargs.get('job_id', None) + self.task_id = kwargs.get('task_id', None) + self.subtask_id = kwargs.get('subtask_id', None) + self.task_state = kwargs.get('task_state', None) + self.execution_info = kwargs.get('execution_info', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_information_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_information_py3.py new file mode 100644 index 00000000..9406cba4 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_information_py3.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskInformation(Model): + """Information about a task running on a compute node. + + All required parameters must be populated in order to send to Azure. + + :param task_url: The URL of the task. + :type task_url: str + :param job_id: The ID of the job to which the task belongs. + :type job_id: str + :param task_id: The ID of the task. + :type task_id: str + :param subtask_id: The ID of the subtask if the task is a multi-instance + task. + :type subtask_id: int + :param task_state: Required. The current state of the task. Possible + values include: 'active', 'preparing', 'running', 'completed' + :type task_state: str or ~azure.batch.models.TaskState + :param execution_info: Information about the execution of the task. + :type execution_info: ~azure.batch.models.TaskExecutionInformation + """ + + _validation = { + 'task_state': {'required': True}, + } + + _attribute_map = { + 'task_url': {'key': 'taskUrl', 'type': 'str'}, + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'task_id': {'key': 'taskId', 'type': 'str'}, + 'subtask_id': {'key': 'subtaskId', 'type': 'int'}, + 'task_state': {'key': 'taskState', 'type': 'TaskState'}, + 'execution_info': {'key': 'executionInfo', 'type': 'TaskExecutionInformation'}, + } + + def __init__(self, *, task_state, task_url: str=None, job_id: str=None, task_id: str=None, subtask_id: int=None, execution_info=None, **kwargs) -> None: + super(TaskInformation, self).__init__(**kwargs) + self.task_url = task_url + self.job_id = job_id + self.task_id = task_id + self.subtask_id = subtask_id + self.task_state = task_state + self.execution_info = execution_info diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_list_options.py b/azext/generated/sdk/batch/v2018_12_01/models/task_list_options.py new file mode 100644 index 00000000..08c9cb00 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_list_options.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 tasks can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_list_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_list_options_py3.py new file mode 100644 index 00000000..bb02726e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_list_options_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 tasks can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(TaskListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.expand = expand + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_list_subtasks_options.py b/azext/generated/sdk/batch/v2018_12_01/models/task_list_subtasks_options.py new file mode 100644 index 00000000..8157cee2 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_list_subtasks_options.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskListSubtasksOptions(Model): + """Additional parameters for list_subtasks operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskListSubtasksOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_list_subtasks_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_list_subtasks_options_py3.py new file mode 100644 index 00000000..b8810800 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_list_subtasks_options_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskListSubtasksOptions(Model): + """Additional parameters for list_subtasks operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(TaskListSubtasksOptions, self).__init__(**kwargs) + self.select = select + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_reactivate_options.py b/azext/generated/sdk/batch/v2018_12_01/models/task_reactivate_options.py new file mode 100644 index 00000000..fe074611 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_reactivate_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskReactivateOptions(Model): + """Additional parameters for reactivate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskReactivateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_reactivate_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_reactivate_options_py3.py new file mode 100644 index 00000000..bd39d6c9 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_reactivate_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskReactivateOptions(Model): + """Additional parameters for reactivate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(TaskReactivateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_scheduling_policy.py b/azext/generated/sdk/batch/v2018_12_01/models/task_scheduling_policy.py new file mode 100644 index 00000000..2f121acb --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_scheduling_policy.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskSchedulingPolicy(Model): + """Specifies how tasks should be distributed across compute nodes. + + All required parameters must be populated in order to send to Azure. + + :param node_fill_type: Required. How tasks are distributed across compute + nodes in a pool. Possible values include: 'spread', 'pack' + :type node_fill_type: str or ~azure.batch.models.ComputeNodeFillType + """ + + _validation = { + 'node_fill_type': {'required': True}, + } + + _attribute_map = { + 'node_fill_type': {'key': 'nodeFillType', 'type': 'ComputeNodeFillType'}, + } + + def __init__(self, **kwargs): + super(TaskSchedulingPolicy, self).__init__(**kwargs) + self.node_fill_type = kwargs.get('node_fill_type', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_scheduling_policy_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_scheduling_policy_py3.py new file mode 100644 index 00000000..f3ff79a1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_scheduling_policy_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskSchedulingPolicy(Model): + """Specifies how tasks should be distributed across compute nodes. + + All required parameters must be populated in order to send to Azure. + + :param node_fill_type: Required. How tasks are distributed across compute + nodes in a pool. Possible values include: 'spread', 'pack' + :type node_fill_type: str or ~azure.batch.models.ComputeNodeFillType + """ + + _validation = { + 'node_fill_type': {'required': True}, + } + + _attribute_map = { + 'node_fill_type': {'key': 'nodeFillType', 'type': 'ComputeNodeFillType'}, + } + + def __init__(self, *, node_fill_type, **kwargs) -> None: + super(TaskSchedulingPolicy, self).__init__(**kwargs) + self.node_fill_type = node_fill_type diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_statistics.py b/azext/generated/sdk/batch/v2018_12_01/models/task_statistics.py new file mode 100644 index 00000000..b5f877fc --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_statistics.py @@ -0,0 +1,100 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskStatistics(Model): + """Resource usage statistics for a task. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all compute nodes) consumed by the task. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all compute nodes) consumed by the task. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of the task. + The wall clock time is the elapsed time from when the task started running + on a compute node to when it finished (or to the last time the statistics + were updated, if the task had not finished by then). If the task was + retried, this includes the wall clock time of all the task retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by the task. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by the task. + :type write_iops: long + :param read_io_gi_b: Required. The total gibibytes read from disk by the + task. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total gibibytes written to disk by the + task. + :type write_io_gi_b: float + :param wait_time: Required. The total wait time of the task. The wait time + for a task is defined as the elapsed time between the creation of the task + and the start of task execution. (If the task is retried due to failures, + the wait time is the time to the most recent task execution.). + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(TaskStatistics, self).__init__(**kwargs) + self.url = kwargs.get('url', None) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.user_cpu_time = kwargs.get('user_cpu_time', None) + self.kernel_cpu_time = kwargs.get('kernel_cpu_time', None) + self.wall_clock_time = kwargs.get('wall_clock_time', None) + self.read_iops = kwargs.get('read_iops', None) + self.write_iops = kwargs.get('write_iops', None) + self.read_io_gi_b = kwargs.get('read_io_gi_b', None) + self.write_io_gi_b = kwargs.get('write_io_gi_b', None) + self.wait_time = kwargs.get('wait_time', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_statistics_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_statistics_py3.py new file mode 100644 index 00000000..42de1dba --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_statistics_py3.py @@ -0,0 +1,100 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskStatistics(Model): + """Resource usage statistics for a task. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all compute nodes) consumed by the task. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all compute nodes) consumed by the task. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of the task. + The wall clock time is the elapsed time from when the task started running + on a compute node to when it finished (or to the last time the statistics + were updated, if the task had not finished by then). If the task was + retried, this includes the wall clock time of all the task retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by the task. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by the task. + :type write_iops: long + :param read_io_gi_b: Required. The total gibibytes read from disk by the + task. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total gibibytes written to disk by the + task. + :type write_io_gi_b: float + :param wait_time: Required. The total wait time of the task. The wait time + for a task is defined as the elapsed time between the creation of the task + and the start of task execution. (If the task is retried due to failures, + the wait time is the time to the most recent task execution.). + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, *, url: str, start_time, last_update_time, user_cpu_time, kernel_cpu_time, wall_clock_time, read_iops: int, write_iops: int, read_io_gi_b: float, write_io_gi_b: float, wait_time, **kwargs) -> None: + super(TaskStatistics, self).__init__(**kwargs) + self.url = url + self.start_time = start_time + self.last_update_time = last_update_time + self.user_cpu_time = user_cpu_time + self.kernel_cpu_time = kernel_cpu_time + self.wall_clock_time = wall_clock_time + self.read_iops = read_iops + self.write_iops = write_iops + self.read_io_gi_b = read_io_gi_b + self.write_io_gi_b = write_io_gi_b + self.wait_time = wait_time diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_terminate_options.py b/azext/generated/sdk/batch/v2018_12_01/models/task_terminate_options.py new file mode 100644 index 00000000..1908a9da --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_terminate_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskTerminateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_terminate_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_terminate_options_py3.py new file mode 100644 index 00000000..d967db3a --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_terminate_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(TaskTerminateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_update_options.py b/azext/generated/sdk/batch/v2018_12_01/models/task_update_options.py new file mode 100644 index 00000000..32e1ad82 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_update_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskUpdateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_update_options_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_update_options_py3.py new file mode 100644 index 00000000..2a20ddf5 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_update_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(TaskUpdateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_update_parameter.py b/azext/generated/sdk/batch/v2018_12_01/models/task_update_parameter.py new file mode 100644 index 00000000..84246a43 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_update_parameter.py @@ -0,0 +1,30 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskUpdateParameter(Model): + """The set of changes to be made to a task. + + :param constraints: Constraints that apply to this task. If omitted, the + task is given the default constraints. For multi-instance tasks, updating + the retention time applies only to the primary task and not subtasks. + :type constraints: ~azure.batch.models.TaskConstraints + """ + + _attribute_map = { + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + } + + def __init__(self, **kwargs): + super(TaskUpdateParameter, self).__init__(**kwargs) + self.constraints = kwargs.get('constraints', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/task_update_parameter_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/task_update_parameter_py3.py new file mode 100644 index 00000000..71594e62 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/task_update_parameter_py3.py @@ -0,0 +1,30 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskUpdateParameter(Model): + """The set of changes to be made to a task. + + :param constraints: Constraints that apply to this task. If omitted, the + task is given the default constraints. For multi-instance tasks, updating + the retention time applies only to the primary task and not subtasks. + :type constraints: ~azure.batch.models.TaskConstraints + """ + + _attribute_map = { + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + } + + def __init__(self, *, constraints=None, **kwargs) -> None: + super(TaskUpdateParameter, self).__init__(**kwargs) + self.constraints = constraints diff --git a/azext/generated/sdk/batch/v2018_12_01/models/upload_batch_service_logs_configuration.py b/azext/generated/sdk/batch/v2018_12_01/models/upload_batch_service_logs_configuration.py new file mode 100644 index 00000000..1f96d326 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/upload_batch_service_logs_configuration.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UploadBatchServiceLogsConfiguration(Model): + """The Azure Batch service log files upload configuration for a compute node. + + All required parameters must be populated in order to send to Azure. + + :param container_url: Required. The URL of the container within Azure Blob + Storage to which to upload the Batch Service log file(s). The URL must + include a Shared Access Signature (SAS) granting write permissions to the + container. The SAS duration must allow enough time for the upload to + finish. The start time for SAS is optional and recommended to not be + specified. + :type container_url: str + :param start_time: Required. The start of the time range from which to + upload Batch Service log file(s). Any log file containing a log message in + the time range will be uploaded. This means that the operation might + retrieve more logs than have been requested since the entire log file is + always uploaded, but the operation should not retrieve fewer logs than + have been requested. + :type start_time: datetime + :param end_time: The end of the time range from which to upload Batch + Service log file(s). Any log file containing a log message in the time + range will be uploaded. This means that the operation might retrieve more + logs than have been requested since the entire log file is always + uploaded, but the operation should not retrieve fewer logs than have been + requested. If omitted, the default is to upload all logs available after + the startTime. + :type end_time: datetime + """ + + _validation = { + 'container_url': {'required': True}, + 'start_time': {'required': True}, + } + + _attribute_map = { + 'container_url': {'key': 'containerUrl', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(UploadBatchServiceLogsConfiguration, self).__init__(**kwargs) + self.container_url = kwargs.get('container_url', None) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/upload_batch_service_logs_configuration_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/upload_batch_service_logs_configuration_py3.py new file mode 100644 index 00000000..875beb60 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/upload_batch_service_logs_configuration_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UploadBatchServiceLogsConfiguration(Model): + """The Azure Batch service log files upload configuration for a compute node. + + All required parameters must be populated in order to send to Azure. + + :param container_url: Required. The URL of the container within Azure Blob + Storage to which to upload the Batch Service log file(s). The URL must + include a Shared Access Signature (SAS) granting write permissions to the + container. The SAS duration must allow enough time for the upload to + finish. The start time for SAS is optional and recommended to not be + specified. + :type container_url: str + :param start_time: Required. The start of the time range from which to + upload Batch Service log file(s). Any log file containing a log message in + the time range will be uploaded. This means that the operation might + retrieve more logs than have been requested since the entire log file is + always uploaded, but the operation should not retrieve fewer logs than + have been requested. + :type start_time: datetime + :param end_time: The end of the time range from which to upload Batch + Service log file(s). Any log file containing a log message in the time + range will be uploaded. This means that the operation might retrieve more + logs than have been requested since the entire log file is always + uploaded, but the operation should not retrieve fewer logs than have been + requested. If omitted, the default is to upload all logs available after + the startTime. + :type end_time: datetime + """ + + _validation = { + 'container_url': {'required': True}, + 'start_time': {'required': True}, + } + + _attribute_map = { + 'container_url': {'key': 'containerUrl', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + } + + def __init__(self, *, container_url: str, start_time, end_time=None, **kwargs) -> None: + super(UploadBatchServiceLogsConfiguration, self).__init__(**kwargs) + self.container_url = container_url + self.start_time = start_time + self.end_time = end_time diff --git a/azext/generated/sdk/batch/v2018_12_01/models/upload_batch_service_logs_result.py b/azext/generated/sdk/batch/v2018_12_01/models/upload_batch_service_logs_result.py new file mode 100644 index 00000000..a2d5a0fe --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/upload_batch_service_logs_result.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UploadBatchServiceLogsResult(Model): + """The result of uploading Batch service log files from a specific compute + node. + + All required parameters must be populated in order to send to Azure. + + :param virtual_directory_name: Required. The virtual directory within + Azure Blob Storage container to which the Batch Service log file(s) will + be uploaded. The virtual directory name is part of the blob name for each + log file uploaded, and it is built based poolId, nodeId and a unique + identifier. + :type virtual_directory_name: str + :param number_of_files_uploaded: Required. The number of log files which + will be uploaded. + :type number_of_files_uploaded: int + """ + + _validation = { + 'virtual_directory_name': {'required': True}, + 'number_of_files_uploaded': {'required': True}, + } + + _attribute_map = { + 'virtual_directory_name': {'key': 'virtualDirectoryName', 'type': 'str'}, + 'number_of_files_uploaded': {'key': 'numberOfFilesUploaded', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(UploadBatchServiceLogsResult, self).__init__(**kwargs) + self.virtual_directory_name = kwargs.get('virtual_directory_name', None) + self.number_of_files_uploaded = kwargs.get('number_of_files_uploaded', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/upload_batch_service_logs_result_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/upload_batch_service_logs_result_py3.py new file mode 100644 index 00000000..f9547bc2 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/upload_batch_service_logs_result_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UploadBatchServiceLogsResult(Model): + """The result of uploading Batch service log files from a specific compute + node. + + All required parameters must be populated in order to send to Azure. + + :param virtual_directory_name: Required. The virtual directory within + Azure Blob Storage container to which the Batch Service log file(s) will + be uploaded. The virtual directory name is part of the blob name for each + log file uploaded, and it is built based poolId, nodeId and a unique + identifier. + :type virtual_directory_name: str + :param number_of_files_uploaded: Required. The number of log files which + will be uploaded. + :type number_of_files_uploaded: int + """ + + _validation = { + 'virtual_directory_name': {'required': True}, + 'number_of_files_uploaded': {'required': True}, + } + + _attribute_map = { + 'virtual_directory_name': {'key': 'virtualDirectoryName', 'type': 'str'}, + 'number_of_files_uploaded': {'key': 'numberOfFilesUploaded', 'type': 'int'}, + } + + def __init__(self, *, virtual_directory_name: str, number_of_files_uploaded: int, **kwargs) -> None: + super(UploadBatchServiceLogsResult, self).__init__(**kwargs) + self.virtual_directory_name = virtual_directory_name + self.number_of_files_uploaded = number_of_files_uploaded diff --git a/azext/generated/sdk/batch/v2018_12_01/models/usage_statistics.py b/azext/generated/sdk/batch/v2018_12_01/models/usage_statistics.py new file mode 100644 index 00000000..08d709ad --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/usage_statistics.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UsageStatistics(Model): + """Statistics related to pool usage information. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param dedicated_core_time: Required. The aggregated wall-clock time of + the dedicated compute node cores being part of the pool. + :type dedicated_core_time: timedelta + """ + + _validation = { + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'dedicated_core_time': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'dedicated_core_time': {'key': 'dedicatedCoreTime', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(UsageStatistics, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.dedicated_core_time = kwargs.get('dedicated_core_time', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/usage_statistics_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/usage_statistics_py3.py new file mode 100644 index 00000000..9fafd25d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/usage_statistics_py3.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UsageStatistics(Model): + """Statistics related to pool usage information. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param dedicated_core_time: Required. The aggregated wall-clock time of + the dedicated compute node cores being part of the pool. + :type dedicated_core_time: timedelta + """ + + _validation = { + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'dedicated_core_time': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'dedicated_core_time': {'key': 'dedicatedCoreTime', 'type': 'duration'}, + } + + def __init__(self, *, start_time, last_update_time, dedicated_core_time, **kwargs) -> None: + super(UsageStatistics, self).__init__(**kwargs) + self.start_time = start_time + self.last_update_time = last_update_time + self.dedicated_core_time = dedicated_core_time diff --git a/azext/generated/sdk/batch/v2018_12_01/models/user_account.py b/azext/generated/sdk/batch/v2018_12_01/models/user_account.py new file mode 100644 index 00000000..419ff614 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/user_account.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UserAccount(Model): + """Properties used to create a user used to execute tasks on an Azure Batch + node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the user account. + :type name: str + :param password: Required. The password for the user account. + :type password: str + :param elevation_level: The elevation level of the user account. The + default value is nonAdmin. Possible values include: 'nonAdmin', 'admin' + :type elevation_level: str or ~azure.batch.models.ElevationLevel + :param linux_user_configuration: The Linux-specific user configuration for + the user account. This property is ignored if specified on a Windows pool. + If not specified, the user is created with the default options. + :type linux_user_configuration: ~azure.batch.models.LinuxUserConfiguration + :param windows_user_configuration: The Windows-specific user configuration + for the user account. This property can only be specified if the user is + on a Windows pool. If not specified and on a Windows pool, the user is + created with the default options. + :type windows_user_configuration: + ~azure.batch.models.WindowsUserConfiguration + """ + + _validation = { + 'name': {'required': True}, + 'password': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, + 'linux_user_configuration': {'key': 'linuxUserConfiguration', 'type': 'LinuxUserConfiguration'}, + 'windows_user_configuration': {'key': 'windowsUserConfiguration', 'type': 'WindowsUserConfiguration'}, + } + + def __init__(self, **kwargs): + super(UserAccount, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.password = kwargs.get('password', None) + self.elevation_level = kwargs.get('elevation_level', None) + self.linux_user_configuration = kwargs.get('linux_user_configuration', None) + self.windows_user_configuration = kwargs.get('windows_user_configuration', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/user_account_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/user_account_py3.py new file mode 100644 index 00000000..875bdb94 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/user_account_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UserAccount(Model): + """Properties used to create a user used to execute tasks on an Azure Batch + node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the user account. + :type name: str + :param password: Required. The password for the user account. + :type password: str + :param elevation_level: The elevation level of the user account. The + default value is nonAdmin. Possible values include: 'nonAdmin', 'admin' + :type elevation_level: str or ~azure.batch.models.ElevationLevel + :param linux_user_configuration: The Linux-specific user configuration for + the user account. This property is ignored if specified on a Windows pool. + If not specified, the user is created with the default options. + :type linux_user_configuration: ~azure.batch.models.LinuxUserConfiguration + :param windows_user_configuration: The Windows-specific user configuration + for the user account. This property can only be specified if the user is + on a Windows pool. If not specified and on a Windows pool, the user is + created with the default options. + :type windows_user_configuration: + ~azure.batch.models.WindowsUserConfiguration + """ + + _validation = { + 'name': {'required': True}, + 'password': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, + 'linux_user_configuration': {'key': 'linuxUserConfiguration', 'type': 'LinuxUserConfiguration'}, + 'windows_user_configuration': {'key': 'windowsUserConfiguration', 'type': 'WindowsUserConfiguration'}, + } + + def __init__(self, *, name: str, password: str, elevation_level=None, linux_user_configuration=None, windows_user_configuration=None, **kwargs) -> None: + super(UserAccount, self).__init__(**kwargs) + self.name = name + self.password = password + self.elevation_level = elevation_level + self.linux_user_configuration = linux_user_configuration + self.windows_user_configuration = windows_user_configuration diff --git a/azext/generated/sdk/batch/v2018_12_01/models/user_identity.py b/azext/generated/sdk/batch/v2018_12_01/models/user_identity.py new file mode 100644 index 00000000..1301c24e --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/user_identity.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UserIdentity(Model): + """The definition of the user identity under which the task is run. + + Specify either the userName or autoUser property, but not both. + + :param user_name: The name of the user identity under which the task is + run. The userName and autoUser properties are mutually exclusive; you must + specify one but not both. + :type user_name: str + :param auto_user: The auto user under which the task is run. The userName + and autoUser properties are mutually exclusive; you must specify one but + not both. + :type auto_user: ~azure.batch.models.AutoUserSpecification + """ + + _attribute_map = { + 'user_name': {'key': 'username', 'type': 'str'}, + 'auto_user': {'key': 'autoUser', 'type': 'AutoUserSpecification'}, + } + + def __init__(self, **kwargs): + super(UserIdentity, self).__init__(**kwargs) + self.user_name = kwargs.get('user_name', None) + self.auto_user = kwargs.get('auto_user', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/user_identity_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/user_identity_py3.py new file mode 100644 index 00000000..37992852 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/user_identity_py3.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UserIdentity(Model): + """The definition of the user identity under which the task is run. + + Specify either the userName or autoUser property, but not both. + + :param user_name: The name of the user identity under which the task is + run. The userName and autoUser properties are mutually exclusive; you must + specify one but not both. + :type user_name: str + :param auto_user: The auto user under which the task is run. The userName + and autoUser properties are mutually exclusive; you must specify one but + not both. + :type auto_user: ~azure.batch.models.AutoUserSpecification + """ + + _attribute_map = { + 'user_name': {'key': 'username', 'type': 'str'}, + 'auto_user': {'key': 'autoUser', 'type': 'AutoUserSpecification'}, + } + + def __init__(self, *, user_name: str=None, auto_user=None, **kwargs) -> None: + super(UserIdentity, self).__init__(**kwargs) + self.user_name = user_name + self.auto_user = auto_user diff --git a/azext/generated/sdk/batch/v2018_12_01/models/virtual_machine_configuration.py b/azext/generated/sdk/batch/v2018_12_01/models/virtual_machine_configuration.py new file mode 100644 index 00000000..592d8009 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/virtual_machine_configuration.py @@ -0,0 +1,87 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class VirtualMachineConfiguration(Model): + """The configuration for compute nodes in a pool based on the Azure Virtual + Machines infrastructure. + + All required parameters must be populated in order to send to Azure. + + :param image_reference: Required. A reference to the Azure Virtual + Machines Marketplace image or the custom Virtual Machine image to use. + :type image_reference: ~azure.batch.models.ImageReference + :param node_agent_sku_id: Required. The SKU of the Batch node agent to be + provisioned on compute nodes in the pool. The Batch node agent is a + program that runs on each node in the pool, and provides the + command-and-control interface between the node and the Batch service. + There are different implementations of the node agent, known as SKUs, for + different operating systems. You must specify a node agent SKU which + matches the selected image reference. To get the list of supported node + agent SKUs along with their list of verified image references, see the + 'List supported node agent SKUs' operation. + :type node_agent_sku_id: str + :param windows_configuration: Windows operating system settings on the + virtual machine. This property must not be specified if the imageReference + property specifies a Linux OS image. + :type windows_configuration: ~azure.batch.models.WindowsConfiguration + :param data_disks: The configuration for data disks attached to the + compute nodes in the pool. This property must be specified if the compute + nodes in the pool need to have empty data disks attached to them. This + cannot be updated. Each node gets its own disk (the disk is not a file + share). Existing disks cannot be attached, each attached disk is empty. + When the node is removed from the pool, the disk and all data associated + with it is also deleted. The disk is not formatted after being attached, + it must be formatted before use - for more information see + https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux + and + https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. + :type data_disks: list[~azure.batch.models.DataDisk] + :param license_type: The type of on-premises license to be used when + deploying the operating system. This only applies to images that contain + the Windows operating system, and should only be used when you hold valid + on-premises licenses for the nodes which will be deployed. If omitted, no + on-premises licensing discount is applied. Values are: + Windows_Server - The on-premises license is for Windows Server. + Windows_Client - The on-premises license is for Windows Client. + :type license_type: str + :param container_configuration: The container configuration for the pool. + If specified, setup is performed on each node in the pool to allow tasks + to run in containers. All regular tasks and job manager tasks run on this + pool must specify the containerSettings property, and all other tasks may + specify it. + :type container_configuration: ~azure.batch.models.ContainerConfiguration + """ + + _validation = { + 'image_reference': {'required': True}, + 'node_agent_sku_id': {'required': True}, + } + + _attribute_map = { + 'image_reference': {'key': 'imageReference', 'type': 'ImageReference'}, + 'node_agent_sku_id': {'key': 'nodeAgentSKUId', 'type': 'str'}, + 'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'}, + 'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'}, + 'license_type': {'key': 'licenseType', 'type': 'str'}, + 'container_configuration': {'key': 'containerConfiguration', 'type': 'ContainerConfiguration'}, + } + + def __init__(self, **kwargs): + super(VirtualMachineConfiguration, self).__init__(**kwargs) + self.image_reference = kwargs.get('image_reference', None) + self.node_agent_sku_id = kwargs.get('node_agent_sku_id', None) + self.windows_configuration = kwargs.get('windows_configuration', None) + self.data_disks = kwargs.get('data_disks', None) + self.license_type = kwargs.get('license_type', None) + self.container_configuration = kwargs.get('container_configuration', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/virtual_machine_configuration_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/virtual_machine_configuration_py3.py new file mode 100644 index 00000000..b9ca8b26 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/virtual_machine_configuration_py3.py @@ -0,0 +1,87 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class VirtualMachineConfiguration(Model): + """The configuration for compute nodes in a pool based on the Azure Virtual + Machines infrastructure. + + All required parameters must be populated in order to send to Azure. + + :param image_reference: Required. A reference to the Azure Virtual + Machines Marketplace image or the custom Virtual Machine image to use. + :type image_reference: ~azure.batch.models.ImageReference + :param node_agent_sku_id: Required. The SKU of the Batch node agent to be + provisioned on compute nodes in the pool. The Batch node agent is a + program that runs on each node in the pool, and provides the + command-and-control interface between the node and the Batch service. + There are different implementations of the node agent, known as SKUs, for + different operating systems. You must specify a node agent SKU which + matches the selected image reference. To get the list of supported node + agent SKUs along with their list of verified image references, see the + 'List supported node agent SKUs' operation. + :type node_agent_sku_id: str + :param windows_configuration: Windows operating system settings on the + virtual machine. This property must not be specified if the imageReference + property specifies a Linux OS image. + :type windows_configuration: ~azure.batch.models.WindowsConfiguration + :param data_disks: The configuration for data disks attached to the + compute nodes in the pool. This property must be specified if the compute + nodes in the pool need to have empty data disks attached to them. This + cannot be updated. Each node gets its own disk (the disk is not a file + share). Existing disks cannot be attached, each attached disk is empty. + When the node is removed from the pool, the disk and all data associated + with it is also deleted. The disk is not formatted after being attached, + it must be formatted before use - for more information see + https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux + and + https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. + :type data_disks: list[~azure.batch.models.DataDisk] + :param license_type: The type of on-premises license to be used when + deploying the operating system. This only applies to images that contain + the Windows operating system, and should only be used when you hold valid + on-premises licenses for the nodes which will be deployed. If omitted, no + on-premises licensing discount is applied. Values are: + Windows_Server - The on-premises license is for Windows Server. + Windows_Client - The on-premises license is for Windows Client. + :type license_type: str + :param container_configuration: The container configuration for the pool. + If specified, setup is performed on each node in the pool to allow tasks + to run in containers. All regular tasks and job manager tasks run on this + pool must specify the containerSettings property, and all other tasks may + specify it. + :type container_configuration: ~azure.batch.models.ContainerConfiguration + """ + + _validation = { + 'image_reference': {'required': True}, + 'node_agent_sku_id': {'required': True}, + } + + _attribute_map = { + 'image_reference': {'key': 'imageReference', 'type': 'ImageReference'}, + 'node_agent_sku_id': {'key': 'nodeAgentSKUId', 'type': 'str'}, + 'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'}, + 'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'}, + 'license_type': {'key': 'licenseType', 'type': 'str'}, + 'container_configuration': {'key': 'containerConfiguration', 'type': 'ContainerConfiguration'}, + } + + def __init__(self, *, image_reference, node_agent_sku_id: str, windows_configuration=None, data_disks=None, license_type: str=None, container_configuration=None, **kwargs) -> None: + super(VirtualMachineConfiguration, self).__init__(**kwargs) + self.image_reference = image_reference + self.node_agent_sku_id = node_agent_sku_id + self.windows_configuration = windows_configuration + self.data_disks = data_disks + self.license_type = license_type + self.container_configuration = container_configuration diff --git a/azext/generated/sdk/batch/v2018_12_01/models/windows_configuration.py b/azext/generated/sdk/batch/v2018_12_01/models/windows_configuration.py new file mode 100644 index 00000000..6b27533d --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/windows_configuration.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class WindowsConfiguration(Model): + """Windows operating system settings to apply to the virtual machine. + + :param enable_automatic_updates: Whether automatic updates are enabled on + the virtual machine. If omitted, the default value is true. + :type enable_automatic_updates: bool + """ + + _attribute_map = { + 'enable_automatic_updates': {'key': 'enableAutomaticUpdates', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(WindowsConfiguration, self).__init__(**kwargs) + self.enable_automatic_updates = kwargs.get('enable_automatic_updates', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/windows_configuration_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/windows_configuration_py3.py new file mode 100644 index 00000000..40a4aedf --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/windows_configuration_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class WindowsConfiguration(Model): + """Windows operating system settings to apply to the virtual machine. + + :param enable_automatic_updates: Whether automatic updates are enabled on + the virtual machine. If omitted, the default value is true. + :type enable_automatic_updates: bool + """ + + _attribute_map = { + 'enable_automatic_updates': {'key': 'enableAutomaticUpdates', 'type': 'bool'}, + } + + def __init__(self, *, enable_automatic_updates: bool=None, **kwargs) -> None: + super(WindowsConfiguration, self).__init__(**kwargs) + self.enable_automatic_updates = enable_automatic_updates diff --git a/azext/generated/sdk/batch/v2018_12_01/models/windows_user_configuration.py b/azext/generated/sdk/batch/v2018_12_01/models/windows_user_configuration.py new file mode 100644 index 00000000..a895e805 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/windows_user_configuration.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class WindowsUserConfiguration(Model): + """Properties used to create a user account on a Windows node. + + :param login_mode: The login mode for the user. The default value for + VirtualMachineConfiguration pools is batch and for + CloudServiceConfiguration pools is interactive. Possible values include: + 'batch', 'interactive' + :type login_mode: str or ~azure.batch.models.LoginMode + """ + + _attribute_map = { + 'login_mode': {'key': 'loginMode', 'type': 'LoginMode'}, + } + + def __init__(self, **kwargs): + super(WindowsUserConfiguration, self).__init__(**kwargs) + self.login_mode = kwargs.get('login_mode', None) diff --git a/azext/generated/sdk/batch/v2018_12_01/models/windows_user_configuration_py3.py b/azext/generated/sdk/batch/v2018_12_01/models/windows_user_configuration_py3.py new file mode 100644 index 00000000..89177928 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/models/windows_user_configuration_py3.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class WindowsUserConfiguration(Model): + """Properties used to create a user account on a Windows node. + + :param login_mode: The login mode for the user. The default value for + VirtualMachineConfiguration pools is batch and for + CloudServiceConfiguration pools is interactive. Possible values include: + 'batch', 'interactive' + :type login_mode: str or ~azure.batch.models.LoginMode + """ + + _attribute_map = { + 'login_mode': {'key': 'loginMode', 'type': 'LoginMode'}, + } + + def __init__(self, *, login_mode=None, **kwargs) -> None: + super(WindowsUserConfiguration, self).__init__(**kwargs) + self.login_mode = login_mode diff --git a/azext/generated/sdk/batch/v2018_12_01/operations/__init__.py b/azext/generated/sdk/batch/v2018_12_01/operations/__init__.py new file mode 100644 index 00000000..5b1c54cc --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/operations/__init__.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_operations import ApplicationOperations +from .pool_operations import PoolOperations +from .account_operations import AccountOperations +from .job_operations import JobOperations +from .certificate_operations import CertificateOperations +from .file_operations import FileOperations +from .job_schedule_operations import JobScheduleOperations +from .task_operations import TaskOperations +from .compute_node_operations import ComputeNodeOperations + +__all__ = [ + 'ApplicationOperations', + 'PoolOperations', + 'AccountOperations', + 'JobOperations', + 'CertificateOperations', + 'FileOperations', + 'JobScheduleOperations', + 'TaskOperations', + 'ComputeNodeOperations', +] diff --git a/azext/generated/sdk/batch/v2018_12_01/operations/account_operations.py b/azext/generated/sdk/batch/v2018_12_01/operations/account_operations.py new file mode 100644 index 00000000..a2f9a08b --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/operations/account_operations.py @@ -0,0 +1,233 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class AccountOperations(object): + """AccountOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2018-12-01.8.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2018-12-01.8.0" + + self.config = config + + def list_node_agent_skus( + self, account_list_node_agent_skus_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all node agent SKUs supported by the Azure Batch service. + + :param account_list_node_agent_skus_options: Additional parameters for + the operation + :type account_list_node_agent_skus_options: + ~azure.batch.models.AccountListNodeAgentSkusOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of NodeAgentSku + :rtype: + ~azure.batch.models.NodeAgentSkuPaged[~azure.batch.models.NodeAgentSku] + :raises: + :class:`BatchErrorException` + """ + filter = None + if account_list_node_agent_skus_options is not None: + filter = account_list_node_agent_skus_options.filter + max_results = None + if account_list_node_agent_skus_options is not None: + max_results = account_list_node_agent_skus_options.max_results + timeout = None + if account_list_node_agent_skus_options is not None: + timeout = account_list_node_agent_skus_options.timeout + client_request_id = None + if account_list_node_agent_skus_options is not None: + client_request_id = account_list_node_agent_skus_options.client_request_id + return_client_request_id = None + if account_list_node_agent_skus_options is not None: + return_client_request_id = account_list_node_agent_skus_options.return_client_request_id + ocp_date = None + if account_list_node_agent_skus_options is not None: + ocp_date = account_list_node_agent_skus_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_node_agent_skus.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.NodeAgentSkuPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.NodeAgentSkuPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_node_agent_skus.metadata = {'url': '/nodeagentskus'} + + def list_pool_node_counts( + self, account_list_pool_node_counts_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the number of nodes in each state, grouped by pool. + + :param account_list_pool_node_counts_options: Additional parameters + for the operation + :type account_list_pool_node_counts_options: + ~azure.batch.models.AccountListPoolNodeCountsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of PoolNodeCounts + :rtype: + ~azure.batch.models.PoolNodeCountsPaged[~azure.batch.models.PoolNodeCounts] + :raises: + :class:`BatchErrorException` + """ + filter = None + if account_list_pool_node_counts_options is not None: + filter = account_list_pool_node_counts_options.filter + max_results = None + if account_list_pool_node_counts_options is not None: + max_results = account_list_pool_node_counts_options.max_results + timeout = None + if account_list_pool_node_counts_options is not None: + timeout = account_list_pool_node_counts_options.timeout + client_request_id = None + if account_list_pool_node_counts_options is not None: + client_request_id = account_list_pool_node_counts_options.client_request_id + return_client_request_id = None + if account_list_pool_node_counts_options is not None: + return_client_request_id = account_list_pool_node_counts_options.return_client_request_id + ocp_date = None + if account_list_pool_node_counts_options is not None: + ocp_date = account_list_pool_node_counts_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_pool_node_counts.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=10, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.PoolNodeCountsPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.PoolNodeCountsPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_pool_node_counts.metadata = {'url': '/nodecounts'} diff --git a/azext/generated/sdk/batch/v2018_12_01/operations/application_operations.py b/azext/generated/sdk/batch/v2018_12_01/operations/application_operations.py new file mode 100644 index 00000000..c2353c69 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/operations/application_operations.py @@ -0,0 +1,233 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class ApplicationOperations(object): + """ApplicationOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2018-12-01.8.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2018-12-01.8.0" + + self.config = config + + def list( + self, application_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the applications available in the specified account. + + This operation returns only applications and versions that are + available for use on compute nodes; that is, that can be used in an + application package reference. For administrator information about + applications and versions that are not yet available to compute nodes, + use the Azure portal or the Azure Resource Manager API. + + :param application_list_options: Additional parameters for the + operation + :type application_list_options: + ~azure.batch.models.ApplicationListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of ApplicationSummary + :rtype: + ~azure.batch.models.ApplicationSummaryPaged[~azure.batch.models.ApplicationSummary] + :raises: + :class:`BatchErrorException` + """ + max_results = None + if application_list_options is not None: + max_results = application_list_options.max_results + timeout = None + if application_list_options is not None: + timeout = application_list_options.timeout + client_request_id = None + if application_list_options is not None: + client_request_id = application_list_options.client_request_id + return_client_request_id = None + if application_list_options is not None: + return_client_request_id = application_list_options.return_client_request_id + ocp_date = None + if application_list_options is not None: + ocp_date = application_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.ApplicationSummaryPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.ApplicationSummaryPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/applications'} + + def get( + self, application_id, application_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified application. + + This operation returns only applications and versions that are + available for use on compute nodes; that is, that can be used in an + application package reference. For administrator information about + applications and versions that are not yet available to compute nodes, + use the Azure portal or the Azure Resource Manager API. + + :param application_id: The ID of the application. + :type application_id: str + :param application_get_options: Additional parameters for the + operation + :type application_get_options: + ~azure.batch.models.ApplicationGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ApplicationSummary or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.ApplicationSummary or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if application_get_options is not None: + timeout = application_get_options.timeout + client_request_id = None + if application_get_options is not None: + client_request_id = application_get_options.client_request_id + return_client_request_id = None + if application_get_options is not None: + return_client_request_id = application_get_options.return_client_request_id + ocp_date = None + if application_get_options is not None: + ocp_date = application_get_options.ocp_date + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'applicationId': self._serialize.url("application_id", application_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('ApplicationSummary', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/applications/{applicationId}'} diff --git a/azext/generated/sdk/batch/v2018_12_01/operations/certificate_operations.py b/azext/generated/sdk/batch/v2018_12_01/operations/certificate_operations.py new file mode 100644 index 00000000..0d069ece --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/operations/certificate_operations.py @@ -0,0 +1,515 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class CertificateOperations(object): + """CertificateOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2018-12-01.8.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2018-12-01.8.0" + + self.config = config + + def add( + self, certificate, certificate_add_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a certificate to the specified account. + + :param certificate: The certificate to be added. + :type certificate: ~azure.batch.models.CertificateAddParameter + :param certificate_add_options: Additional parameters for the + operation + :type certificate_add_options: + ~azure.batch.models.CertificateAddOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if certificate_add_options is not None: + timeout = certificate_add_options.timeout + client_request_id = None + if certificate_add_options is not None: + client_request_id = certificate_add_options.client_request_id + return_client_request_id = None + if certificate_add_options is not None: + return_client_request_id = certificate_add_options.return_client_request_id + ocp_date = None + if certificate_add_options is not None: + ocp_date = certificate_add_options.ocp_date + + # Construct URL + url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(certificate, 'CertificateAddParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add.metadata = {'url': '/certificates'} + + def list( + self, certificate_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the certificates that have been added to the specified + account. + + :param certificate_list_options: Additional parameters for the + operation + :type certificate_list_options: + ~azure.batch.models.CertificateListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of Certificate + :rtype: + ~azure.batch.models.CertificatePaged[~azure.batch.models.Certificate] + :raises: + :class:`BatchErrorException` + """ + filter = None + if certificate_list_options is not None: + filter = certificate_list_options.filter + select = None + if certificate_list_options is not None: + select = certificate_list_options.select + max_results = None + if certificate_list_options is not None: + max_results = certificate_list_options.max_results + timeout = None + if certificate_list_options is not None: + timeout = certificate_list_options.timeout + client_request_id = None + if certificate_list_options is not None: + client_request_id = certificate_list_options.client_request_id + return_client_request_id = None + if certificate_list_options is not None: + return_client_request_id = certificate_list_options.return_client_request_id + ocp_date = None + if certificate_list_options is not None: + ocp_date = certificate_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CertificatePaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CertificatePaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/certificates'} + + def cancel_deletion( + self, thumbprint_algorithm, thumbprint, certificate_cancel_deletion_options=None, custom_headers=None, raw=False, **operation_config): + """Cancels a failed deletion of a certificate from the specified account. + + If you try to delete a certificate that is being used by a pool or + compute node, the status of the certificate changes to deleteFailed. If + you decide that you want to continue using the certificate, you can use + this operation to set the status of the certificate back to active. If + you intend to delete the certificate, you do not need to run this + operation after the deletion failed. You must make sure that the + certificate is not being used by any resources, and then you can try + again to delete the certificate. + + :param thumbprint_algorithm: The algorithm used to derive the + thumbprint parameter. This must be sha1. + :type thumbprint_algorithm: str + :param thumbprint: The thumbprint of the certificate being deleted. + :type thumbprint: str + :param certificate_cancel_deletion_options: Additional parameters for + the operation + :type certificate_cancel_deletion_options: + ~azure.batch.models.CertificateCancelDeletionOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if certificate_cancel_deletion_options is not None: + timeout = certificate_cancel_deletion_options.timeout + client_request_id = None + if certificate_cancel_deletion_options is not None: + client_request_id = certificate_cancel_deletion_options.client_request_id + return_client_request_id = None + if certificate_cancel_deletion_options is not None: + return_client_request_id = certificate_cancel_deletion_options.return_client_request_id + ocp_date = None + if certificate_cancel_deletion_options is not None: + ocp_date = certificate_cancel_deletion_options.ocp_date + + # Construct URL + url = self.cancel_deletion.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'thumbprintAlgorithm': self._serialize.url("thumbprint_algorithm", thumbprint_algorithm, 'str'), + 'thumbprint': self._serialize.url("thumbprint", thumbprint, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + cancel_deletion.metadata = {'url': '/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})/canceldelete'} + + def delete( + self, thumbprint_algorithm, thumbprint, certificate_delete_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a certificate from the specified account. + + You cannot delete a certificate if a resource (pool or compute node) is + using it. Before you can delete a certificate, you must therefore make + sure that the certificate is not associated with any existing pools, + the certificate is not installed on any compute nodes (even if you + remove a certificate from a pool, it is not removed from existing + compute nodes in that pool until they restart), and no running tasks + depend on the certificate. If you try to delete a certificate that is + in use, the deletion fails. The certificate status changes to + deleteFailed. You can use Cancel Delete Certificate to set the status + back to active if you decide that you want to continue using the + certificate. + + :param thumbprint_algorithm: The algorithm used to derive the + thumbprint parameter. This must be sha1. + :type thumbprint_algorithm: str + :param thumbprint: The thumbprint of the certificate to be deleted. + :type thumbprint: str + :param certificate_delete_options: Additional parameters for the + operation + :type certificate_delete_options: + ~azure.batch.models.CertificateDeleteOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if certificate_delete_options is not None: + timeout = certificate_delete_options.timeout + client_request_id = None + if certificate_delete_options is not None: + client_request_id = certificate_delete_options.client_request_id + return_client_request_id = None + if certificate_delete_options is not None: + return_client_request_id = certificate_delete_options.return_client_request_id + ocp_date = None + if certificate_delete_options is not None: + ocp_date = certificate_delete_options.ocp_date + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'thumbprintAlgorithm': self._serialize.url("thumbprint_algorithm", thumbprint_algorithm, 'str'), + 'thumbprint': self._serialize.url("thumbprint", thumbprint, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + }) + return client_raw_response + delete.metadata = {'url': '/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})'} + + def get( + self, thumbprint_algorithm, thumbprint, certificate_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified certificate. + + :param thumbprint_algorithm: The algorithm used to derive the + thumbprint parameter. This must be sha1. + :type thumbprint_algorithm: str + :param thumbprint: The thumbprint of the certificate to get. + :type thumbprint: str + :param certificate_get_options: Additional parameters for the + operation + :type certificate_get_options: + ~azure.batch.models.CertificateGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: Certificate or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.Certificate or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if certificate_get_options is not None: + select = certificate_get_options.select + timeout = None + if certificate_get_options is not None: + timeout = certificate_get_options.timeout + client_request_id = None + if certificate_get_options is not None: + client_request_id = certificate_get_options.client_request_id + return_client_request_id = None + if certificate_get_options is not None: + return_client_request_id = certificate_get_options.return_client_request_id + ocp_date = None + if certificate_get_options is not None: + ocp_date = certificate_get_options.ocp_date + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'thumbprintAlgorithm': self._serialize.url("thumbprint_algorithm", thumbprint_algorithm, 'str'), + 'thumbprint': self._serialize.url("thumbprint", thumbprint, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('Certificate', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})'} diff --git a/azext/generated/sdk/batch/v2018_12_01/operations/compute_node_operations.py b/azext/generated/sdk/batch/v2018_12_01/operations/compute_node_operations.py new file mode 100644 index 00000000..008b6827 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/operations/compute_node_operations.py @@ -0,0 +1,1239 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class ComputeNodeOperations(object): + """ComputeNodeOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2018-12-01.8.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2018-12-01.8.0" + + self.config = config + + def add_user( + self, pool_id, node_id, user, compute_node_add_user_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a user account to the specified compute node. + + You can add a user account to a node only when it is in the idle or + running state. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the machine on which you want to create a + user account. + :type node_id: str + :param user: The user account to be created. + :type user: ~azure.batch.models.ComputeNodeUser + :param compute_node_add_user_options: Additional parameters for the + operation + :type compute_node_add_user_options: + ~azure.batch.models.ComputeNodeAddUserOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_add_user_options is not None: + timeout = compute_node_add_user_options.timeout + client_request_id = None + if compute_node_add_user_options is not None: + client_request_id = compute_node_add_user_options.client_request_id + return_client_request_id = None + if compute_node_add_user_options is not None: + return_client_request_id = compute_node_add_user_options.return_client_request_id + ocp_date = None + if compute_node_add_user_options is not None: + ocp_date = compute_node_add_user_options.ocp_date + + # Construct URL + url = self.add_user.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(user, 'ComputeNodeUser') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add_user.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/users'} + + def delete_user( + self, pool_id, node_id, user_name, compute_node_delete_user_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a user account from the specified compute node. + + You can delete a user account to a node only when it is in the idle or + running state. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the machine on which you want to delete a + user account. + :type node_id: str + :param user_name: The name of the user account to delete. + :type user_name: str + :param compute_node_delete_user_options: Additional parameters for the + operation + :type compute_node_delete_user_options: + ~azure.batch.models.ComputeNodeDeleteUserOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_delete_user_options is not None: + timeout = compute_node_delete_user_options.timeout + client_request_id = None + if compute_node_delete_user_options is not None: + client_request_id = compute_node_delete_user_options.client_request_id + return_client_request_id = None + if compute_node_delete_user_options is not None: + return_client_request_id = compute_node_delete_user_options.return_client_request_id + ocp_date = None + if compute_node_delete_user_options is not None: + ocp_date = compute_node_delete_user_options.ocp_date + + # Construct URL + url = self.delete_user.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str'), + 'userName': self._serialize.url("user_name", user_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete_user.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/users/{userName}'} + + def update_user( + self, pool_id, node_id, user_name, node_update_user_parameter, compute_node_update_user_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the password and expiration time of a user account on the + specified compute node. + + This operation replaces of all the updatable properties of the account. + For example, if the expiryTime element is not specified, the current + value is replaced with the default value, not left unmodified. You can + update a user account on a node only when it is in the idle or running + state. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the machine on which you want to update a + user account. + :type node_id: str + :param user_name: The name of the user account to update. + :type user_name: str + :param node_update_user_parameter: The parameters for the request. + :type node_update_user_parameter: + ~azure.batch.models.NodeUpdateUserParameter + :param compute_node_update_user_options: Additional parameters for the + operation + :type compute_node_update_user_options: + ~azure.batch.models.ComputeNodeUpdateUserOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_update_user_options is not None: + timeout = compute_node_update_user_options.timeout + client_request_id = None + if compute_node_update_user_options is not None: + client_request_id = compute_node_update_user_options.client_request_id + return_client_request_id = None + if compute_node_update_user_options is not None: + return_client_request_id = compute_node_update_user_options.return_client_request_id + ocp_date = None + if compute_node_update_user_options is not None: + ocp_date = compute_node_update_user_options.ocp_date + + # Construct URL + url = self.update_user.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str'), + 'userName': self._serialize.url("user_name", user_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(node_update_user_parameter, 'NodeUpdateUserParameter') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + update_user.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/users/{userName}'} + + def get( + self, pool_id, node_id, compute_node_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified compute node. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node that you want to get + information about. + :type node_id: str + :param compute_node_get_options: Additional parameters for the + operation + :type compute_node_get_options: + ~azure.batch.models.ComputeNodeGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ComputeNode or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.ComputeNode or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if compute_node_get_options is not None: + select = compute_node_get_options.select + timeout = None + if compute_node_get_options is not None: + timeout = compute_node_get_options.timeout + client_request_id = None + if compute_node_get_options is not None: + client_request_id = compute_node_get_options.client_request_id + return_client_request_id = None + if compute_node_get_options is not None: + return_client_request_id = compute_node_get_options.return_client_request_id + ocp_date = None + if compute_node_get_options is not None: + ocp_date = compute_node_get_options.ocp_date + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('ComputeNode', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}'} + + def reboot( + self, pool_id, node_id, node_reboot_option=None, compute_node_reboot_options=None, custom_headers=None, raw=False, **operation_config): + """Restarts the specified compute node. + + You can restart a node only if it is in an idle or running state. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node that you want to restart. + :type node_id: str + :param node_reboot_option: When to reboot the compute node and what to + do with currently running tasks. The default value is requeue. + Possible values include: 'requeue', 'terminate', 'taskCompletion', + 'retainedData' + :type node_reboot_option: str or + ~azure.batch.models.ComputeNodeRebootOption + :param compute_node_reboot_options: Additional parameters for the + operation + :type compute_node_reboot_options: + ~azure.batch.models.ComputeNodeRebootOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_reboot_options is not None: + timeout = compute_node_reboot_options.timeout + client_request_id = None + if compute_node_reboot_options is not None: + client_request_id = compute_node_reboot_options.client_request_id + return_client_request_id = None + if compute_node_reboot_options is not None: + return_client_request_id = compute_node_reboot_options.return_client_request_id + ocp_date = None + if compute_node_reboot_options is not None: + ocp_date = compute_node_reboot_options.ocp_date + node_reboot_parameter = None + if node_reboot_option is not None: + node_reboot_parameter = models.NodeRebootParameter(node_reboot_option=node_reboot_option) + + # Construct URL + url = self.reboot.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + if node_reboot_parameter is not None: + body_content = self._serialize.body(node_reboot_parameter, 'NodeRebootParameter') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + reboot.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/reboot'} + + def reimage( + self, pool_id, node_id, node_reimage_option=None, compute_node_reimage_options=None, custom_headers=None, raw=False, **operation_config): + """Reinstalls the operating system on the specified compute node. + + You can reinstall the operating system on a node only if it is in an + idle or running state. This API can be invoked only on pools created + with the cloud service configuration property. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node that you want to restart. + :type node_id: str + :param node_reimage_option: When to reimage the compute node and what + to do with currently running tasks. The default value is requeue. + Possible values include: 'requeue', 'terminate', 'taskCompletion', + 'retainedData' + :type node_reimage_option: str or + ~azure.batch.models.ComputeNodeReimageOption + :param compute_node_reimage_options: Additional parameters for the + operation + :type compute_node_reimage_options: + ~azure.batch.models.ComputeNodeReimageOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_reimage_options is not None: + timeout = compute_node_reimage_options.timeout + client_request_id = None + if compute_node_reimage_options is not None: + client_request_id = compute_node_reimage_options.client_request_id + return_client_request_id = None + if compute_node_reimage_options is not None: + return_client_request_id = compute_node_reimage_options.return_client_request_id + ocp_date = None + if compute_node_reimage_options is not None: + ocp_date = compute_node_reimage_options.ocp_date + node_reimage_parameter = None + if node_reimage_option is not None: + node_reimage_parameter = models.NodeReimageParameter(node_reimage_option=node_reimage_option) + + # Construct URL + url = self.reimage.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + if node_reimage_parameter is not None: + body_content = self._serialize.body(node_reimage_parameter, 'NodeReimageParameter') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + reimage.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/reimage'} + + def disable_scheduling( + self, pool_id, node_id, node_disable_scheduling_option=None, compute_node_disable_scheduling_options=None, custom_headers=None, raw=False, **operation_config): + """Disables task scheduling on the specified compute node. + + You can disable task scheduling on a node only if its current + scheduling state is enabled. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node on which you want to + disable task scheduling. + :type node_id: str + :param node_disable_scheduling_option: What to do with currently + running tasks when disabling task scheduling on the compute node. The + default value is requeue. Possible values include: 'requeue', + 'terminate', 'taskCompletion' + :type node_disable_scheduling_option: str or + ~azure.batch.models.DisableComputeNodeSchedulingOption + :param compute_node_disable_scheduling_options: Additional parameters + for the operation + :type compute_node_disable_scheduling_options: + ~azure.batch.models.ComputeNodeDisableSchedulingOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_disable_scheduling_options is not None: + timeout = compute_node_disable_scheduling_options.timeout + client_request_id = None + if compute_node_disable_scheduling_options is not None: + client_request_id = compute_node_disable_scheduling_options.client_request_id + return_client_request_id = None + if compute_node_disable_scheduling_options is not None: + return_client_request_id = compute_node_disable_scheduling_options.return_client_request_id + ocp_date = None + if compute_node_disable_scheduling_options is not None: + ocp_date = compute_node_disable_scheduling_options.ocp_date + node_disable_scheduling_parameter = None + if node_disable_scheduling_option is not None: + node_disable_scheduling_parameter = models.NodeDisableSchedulingParameter(node_disable_scheduling_option=node_disable_scheduling_option) + + # Construct URL + url = self.disable_scheduling.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + if node_disable_scheduling_parameter is not None: + body_content = self._serialize.body(node_disable_scheduling_parameter, 'NodeDisableSchedulingParameter') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + disable_scheduling.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/disablescheduling'} + + def enable_scheduling( + self, pool_id, node_id, compute_node_enable_scheduling_options=None, custom_headers=None, raw=False, **operation_config): + """Enables task scheduling on the specified compute node. + + You can enable task scheduling on a node only if its current scheduling + state is disabled. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node on which you want to enable + task scheduling. + :type node_id: str + :param compute_node_enable_scheduling_options: Additional parameters + for the operation + :type compute_node_enable_scheduling_options: + ~azure.batch.models.ComputeNodeEnableSchedulingOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_enable_scheduling_options is not None: + timeout = compute_node_enable_scheduling_options.timeout + client_request_id = None + if compute_node_enable_scheduling_options is not None: + client_request_id = compute_node_enable_scheduling_options.client_request_id + return_client_request_id = None + if compute_node_enable_scheduling_options is not None: + return_client_request_id = compute_node_enable_scheduling_options.return_client_request_id + ocp_date = None + if compute_node_enable_scheduling_options is not None: + ocp_date = compute_node_enable_scheduling_options.ocp_date + + # Construct URL + url = self.enable_scheduling.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + enable_scheduling.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/enablescheduling'} + + def get_remote_login_settings( + self, pool_id, node_id, compute_node_get_remote_login_settings_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the settings required for remote login to a compute node. + + Before you can remotely login to a node using the remote login + settings, you must create a user account on the node. This API can be + invoked only on pools created with the virtual machine configuration + property. For pools created with a cloud service configuration, see the + GetRemoteDesktop API. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node for which to obtain the + remote login settings. + :type node_id: str + :param compute_node_get_remote_login_settings_options: Additional + parameters for the operation + :type compute_node_get_remote_login_settings_options: + ~azure.batch.models.ComputeNodeGetRemoteLoginSettingsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ComputeNodeGetRemoteLoginSettingsResult or ClientRawResponse + if raw=true + :rtype: ~azure.batch.models.ComputeNodeGetRemoteLoginSettingsResult or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_get_remote_login_settings_options is not None: + timeout = compute_node_get_remote_login_settings_options.timeout + client_request_id = None + if compute_node_get_remote_login_settings_options is not None: + client_request_id = compute_node_get_remote_login_settings_options.client_request_id + return_client_request_id = None + if compute_node_get_remote_login_settings_options is not None: + return_client_request_id = compute_node_get_remote_login_settings_options.return_client_request_id + ocp_date = None + if compute_node_get_remote_login_settings_options is not None: + ocp_date = compute_node_get_remote_login_settings_options.ocp_date + + # Construct URL + url = self.get_remote_login_settings.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('ComputeNodeGetRemoteLoginSettingsResult', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_remote_login_settings.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/remoteloginsettings'} + + def get_remote_desktop( + self, pool_id, node_id, compute_node_get_remote_desktop_options=None, custom_headers=None, raw=False, callback=None, **operation_config): + """Gets the Remote Desktop Protocol file for the specified compute node. + + Before you can access a node by using the RDP file, you must create a + user account on the node. This API can only be invoked on pools created + with a cloud service configuration. For pools created with a virtual + machine configuration, see the GetRemoteLoginSettings API. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node for which you want to get + the Remote Desktop Protocol file. + :type node_id: str + :param compute_node_get_remote_desktop_options: Additional parameters + for the operation + :type compute_node_get_remote_desktop_options: + ~azure.batch.models.ComputeNodeGetRemoteDesktopOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param callback: When specified, will be called with each chunk of + data that is streamed. The callback should take two arguments, the + bytes of the current chunk of data and the response object. If the + data is uploading, response will be None. + :type callback: Callable[Bytes, response=None] + :param operation_config: :ref:`Operation configuration + overrides`. + :return: object or ClientRawResponse if raw=true + :rtype: Generator or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_get_remote_desktop_options is not None: + timeout = compute_node_get_remote_desktop_options.timeout + client_request_id = None + if compute_node_get_remote_desktop_options is not None: + client_request_id = compute_node_get_remote_desktop_options.client_request_id + return_client_request_id = None + if compute_node_get_remote_desktop_options is not None: + return_client_request_id = compute_node_get_remote_desktop_options.return_client_request_id + ocp_date = None + if compute_node_get_remote_desktop_options is not None: + ocp_date = compute_node_get_remote_desktop_options.ocp_date + + # Construct URL + url = self.get_remote_desktop.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=True, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._client.stream_download(response, callback) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_remote_desktop.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/rdp'} + + def upload_batch_service_logs( + self, pool_id, node_id, upload_batch_service_logs_configuration, compute_node_upload_batch_service_logs_options=None, custom_headers=None, raw=False, **operation_config): + """Upload Azure Batch service log files from the specified compute node to + Azure Blob Storage. + + This is for gathering Azure Batch service log files in an automated + fashion from nodes if you are experiencing an error and wish to + escalate to Azure support. The Azure Batch service log files should be + shared with Azure support to aid in debugging issues with the Batch + service. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node from which you want to + upload the Azure Batch service log files. + :type node_id: str + :param upload_batch_service_logs_configuration: The Azure Batch + service log files upload configuration. + :type upload_batch_service_logs_configuration: + ~azure.batch.models.UploadBatchServiceLogsConfiguration + :param compute_node_upload_batch_service_logs_options: Additional + parameters for the operation + :type compute_node_upload_batch_service_logs_options: + ~azure.batch.models.ComputeNodeUploadBatchServiceLogsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: UploadBatchServiceLogsResult or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.UploadBatchServiceLogsResult or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_upload_batch_service_logs_options is not None: + timeout = compute_node_upload_batch_service_logs_options.timeout + client_request_id = None + if compute_node_upload_batch_service_logs_options is not None: + client_request_id = compute_node_upload_batch_service_logs_options.client_request_id + return_client_request_id = None + if compute_node_upload_batch_service_logs_options is not None: + return_client_request_id = compute_node_upload_batch_service_logs_options.return_client_request_id + ocp_date = None + if compute_node_upload_batch_service_logs_options is not None: + ocp_date = compute_node_upload_batch_service_logs_options.ocp_date + + # Construct URL + url = self.upload_batch_service_logs.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(upload_batch_service_logs_configuration, 'UploadBatchServiceLogsConfiguration') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('UploadBatchServiceLogsResult', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + upload_batch_service_logs.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/uploadbatchservicelogs'} + + def list( + self, pool_id, compute_node_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists the compute nodes in the specified pool. + + :param pool_id: The ID of the pool from which you want to list nodes. + :type pool_id: str + :param compute_node_list_options: Additional parameters for the + operation + :type compute_node_list_options: + ~azure.batch.models.ComputeNodeListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of ComputeNode + :rtype: + ~azure.batch.models.ComputeNodePaged[~azure.batch.models.ComputeNode] + :raises: + :class:`BatchErrorException` + """ + filter = None + if compute_node_list_options is not None: + filter = compute_node_list_options.filter + select = None + if compute_node_list_options is not None: + select = compute_node_list_options.select + max_results = None + if compute_node_list_options is not None: + max_results = compute_node_list_options.max_results + timeout = None + if compute_node_list_options is not None: + timeout = compute_node_list_options.timeout + client_request_id = None + if compute_node_list_options is not None: + client_request_id = compute_node_list_options.client_request_id + return_client_request_id = None + if compute_node_list_options is not None: + return_client_request_id = compute_node_list_options.return_client_request_id + ocp_date = None + if compute_node_list_options is not None: + ocp_date = compute_node_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.ComputeNodePaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.ComputeNodePaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/pools/{poolId}/nodes'} diff --git a/azext/generated/sdk/batch/v2018_12_01/operations/file_operations.py b/azext/generated/sdk/batch/v2018_12_01/operations/file_operations.py new file mode 100644 index 00000000..eb681af1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/operations/file_operations.py @@ -0,0 +1,898 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class FileOperations(object): + """FileOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2018-12-01.8.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2018-12-01.8.0" + + self.config = config + + def delete_from_task( + self, job_id, task_id, file_path, recursive=None, file_delete_from_task_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes the specified task file from the compute node where the task + ran. + + :param job_id: The ID of the job that contains the task. + :type job_id: str + :param task_id: The ID of the task whose file you want to delete. + :type task_id: str + :param file_path: The path to the task file or directory that you want + to delete. + :type file_path: str + :param recursive: Whether to delete children of a directory. If the + filePath parameter represents a directory instead of a file, you can + set recursive to true to delete the directory and all of the files and + subdirectories in it. If recursive is false then the directory must be + empty or deletion will fail. + :type recursive: bool + :param file_delete_from_task_options: Additional parameters for the + operation + :type file_delete_from_task_options: + ~azure.batch.models.FileDeleteFromTaskOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_delete_from_task_options is not None: + timeout = file_delete_from_task_options.timeout + client_request_id = None + if file_delete_from_task_options is not None: + client_request_id = file_delete_from_task_options.client_request_id + return_client_request_id = None + if file_delete_from_task_options is not None: + return_client_request_id = file_delete_from_task_options.return_client_request_id + ocp_date = None + if file_delete_from_task_options is not None: + ocp_date = file_delete_from_task_options.ocp_date + + # Construct URL + url = self.delete_from_task.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if recursive is not None: + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete_from_task.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/files/{filePath}'} + + def get_from_task( + self, job_id, task_id, file_path, file_get_from_task_options=None, custom_headers=None, raw=False, callback=None, **operation_config): + """Returns the content of the specified task file. + + :param job_id: The ID of the job that contains the task. + :type job_id: str + :param task_id: The ID of the task whose file you want to retrieve. + :type task_id: str + :param file_path: The path to the task file that you want to get the + content of. + :type file_path: str + :param file_get_from_task_options: Additional parameters for the + operation + :type file_get_from_task_options: + ~azure.batch.models.FileGetFromTaskOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param callback: When specified, will be called with each chunk of + data that is streamed. The callback should take two arguments, the + bytes of the current chunk of data and the response object. If the + data is uploading, response will be None. + :type callback: Callable[Bytes, response=None] + :param operation_config: :ref:`Operation configuration + overrides`. + :return: object or ClientRawResponse if raw=true + :rtype: Generator or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_get_from_task_options is not None: + timeout = file_get_from_task_options.timeout + client_request_id = None + if file_get_from_task_options is not None: + client_request_id = file_get_from_task_options.client_request_id + return_client_request_id = None + if file_get_from_task_options is not None: + return_client_request_id = file_get_from_task_options.return_client_request_id + ocp_date = None + if file_get_from_task_options is not None: + ocp_date = file_get_from_task_options.ocp_date + ocp_range = None + if file_get_from_task_options is not None: + ocp_range = file_get_from_task_options.ocp_range + if_modified_since = None + if file_get_from_task_options is not None: + if_modified_since = file_get_from_task_options.if_modified_since + if_unmodified_since = None + if file_get_from_task_options is not None: + if_unmodified_since = file_get_from_task_options.if_unmodified_since + + # Construct URL + url = self.get_from_task.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if ocp_range is not None: + header_parameters['ocp-range'] = self._serialize.header("ocp_range", ocp_range, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=True, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._client.stream_download(response, callback) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'ocp-creation-time': 'rfc-1123', + 'ocp-batch-file-isdirectory': 'bool', + 'ocp-batch-file-url': 'str', + 'ocp-batch-file-mode': 'str', + 'Content-Type': 'str', + 'Content-Length': 'long', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_from_task.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/files/{filePath}'} + + def get_properties_from_task( + self, job_id, task_id, file_path, file_get_properties_from_task_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the properties of the specified task file. + + :param job_id: The ID of the job that contains the task. + :type job_id: str + :param task_id: The ID of the task whose file you want to get the + properties of. + :type task_id: str + :param file_path: The path to the task file that you want to get the + properties of. + :type file_path: str + :param file_get_properties_from_task_options: Additional parameters + for the operation + :type file_get_properties_from_task_options: + ~azure.batch.models.FileGetPropertiesFromTaskOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_get_properties_from_task_options is not None: + timeout = file_get_properties_from_task_options.timeout + client_request_id = None + if file_get_properties_from_task_options is not None: + client_request_id = file_get_properties_from_task_options.client_request_id + return_client_request_id = None + if file_get_properties_from_task_options is not None: + return_client_request_id = file_get_properties_from_task_options.return_client_request_id + ocp_date = None + if file_get_properties_from_task_options is not None: + ocp_date = file_get_properties_from_task_options.ocp_date + if_modified_since = None + if file_get_properties_from_task_options is not None: + if_modified_since = file_get_properties_from_task_options.if_modified_since + if_unmodified_since = None + if file_get_properties_from_task_options is not None: + if_unmodified_since = file_get_properties_from_task_options.if_unmodified_since + + # Construct URL + url = self.get_properties_from_task.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'ocp-creation-time': 'rfc-1123', + 'ocp-batch-file-isdirectory': 'bool', + 'ocp-batch-file-url': 'str', + 'ocp-batch-file-mode': 'str', + 'Content-Type': 'str', + 'Content-Length': 'long', + }) + return client_raw_response + get_properties_from_task.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/files/{filePath}'} + + def delete_from_compute_node( + self, pool_id, node_id, file_path, recursive=None, file_delete_from_compute_node_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes the specified file from the compute node. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node from which you want to + delete the file. + :type node_id: str + :param file_path: The path to the file or directory that you want to + delete. + :type file_path: str + :param recursive: Whether to delete children of a directory. If the + filePath parameter represents a directory instead of a file, you can + set recursive to true to delete the directory and all of the files and + subdirectories in it. If recursive is false then the directory must be + empty or deletion will fail. + :type recursive: bool + :param file_delete_from_compute_node_options: Additional parameters + for the operation + :type file_delete_from_compute_node_options: + ~azure.batch.models.FileDeleteFromComputeNodeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_delete_from_compute_node_options is not None: + timeout = file_delete_from_compute_node_options.timeout + client_request_id = None + if file_delete_from_compute_node_options is not None: + client_request_id = file_delete_from_compute_node_options.client_request_id + return_client_request_id = None + if file_delete_from_compute_node_options is not None: + return_client_request_id = file_delete_from_compute_node_options.return_client_request_id + ocp_date = None + if file_delete_from_compute_node_options is not None: + ocp_date = file_delete_from_compute_node_options.ocp_date + + # Construct URL + url = self.delete_from_compute_node.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if recursive is not None: + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete_from_compute_node.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/files/{filePath}'} + + def get_from_compute_node( + self, pool_id, node_id, file_path, file_get_from_compute_node_options=None, custom_headers=None, raw=False, callback=None, **operation_config): + """Returns the content of the specified compute node file. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node that contains the file. + :type node_id: str + :param file_path: The path to the compute node file that you want to + get the content of. + :type file_path: str + :param file_get_from_compute_node_options: Additional parameters for + the operation + :type file_get_from_compute_node_options: + ~azure.batch.models.FileGetFromComputeNodeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param callback: When specified, will be called with each chunk of + data that is streamed. The callback should take two arguments, the + bytes of the current chunk of data and the response object. If the + data is uploading, response will be None. + :type callback: Callable[Bytes, response=None] + :param operation_config: :ref:`Operation configuration + overrides`. + :return: object or ClientRawResponse if raw=true + :rtype: Generator or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_get_from_compute_node_options is not None: + timeout = file_get_from_compute_node_options.timeout + client_request_id = None + if file_get_from_compute_node_options is not None: + client_request_id = file_get_from_compute_node_options.client_request_id + return_client_request_id = None + if file_get_from_compute_node_options is not None: + return_client_request_id = file_get_from_compute_node_options.return_client_request_id + ocp_date = None + if file_get_from_compute_node_options is not None: + ocp_date = file_get_from_compute_node_options.ocp_date + ocp_range = None + if file_get_from_compute_node_options is not None: + ocp_range = file_get_from_compute_node_options.ocp_range + if_modified_since = None + if file_get_from_compute_node_options is not None: + if_modified_since = file_get_from_compute_node_options.if_modified_since + if_unmodified_since = None + if file_get_from_compute_node_options is not None: + if_unmodified_since = file_get_from_compute_node_options.if_unmodified_since + + # Construct URL + url = self.get_from_compute_node.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if ocp_range is not None: + header_parameters['ocp-range'] = self._serialize.header("ocp_range", ocp_range, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=True, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._client.stream_download(response, callback) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'ocp-creation-time': 'rfc-1123', + 'ocp-batch-file-isdirectory': 'bool', + 'ocp-batch-file-url': 'str', + 'ocp-batch-file-mode': 'str', + 'Content-Type': 'str', + 'Content-Length': 'long', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_from_compute_node.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/files/{filePath}'} + + def get_properties_from_compute_node( + self, pool_id, node_id, file_path, file_get_properties_from_compute_node_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the properties of the specified compute node file. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node that contains the file. + :type node_id: str + :param file_path: The path to the compute node file that you want to + get the properties of. + :type file_path: str + :param file_get_properties_from_compute_node_options: Additional + parameters for the operation + :type file_get_properties_from_compute_node_options: + ~azure.batch.models.FileGetPropertiesFromComputeNodeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_get_properties_from_compute_node_options is not None: + timeout = file_get_properties_from_compute_node_options.timeout + client_request_id = None + if file_get_properties_from_compute_node_options is not None: + client_request_id = file_get_properties_from_compute_node_options.client_request_id + return_client_request_id = None + if file_get_properties_from_compute_node_options is not None: + return_client_request_id = file_get_properties_from_compute_node_options.return_client_request_id + ocp_date = None + if file_get_properties_from_compute_node_options is not None: + ocp_date = file_get_properties_from_compute_node_options.ocp_date + if_modified_since = None + if file_get_properties_from_compute_node_options is not None: + if_modified_since = file_get_properties_from_compute_node_options.if_modified_since + if_unmodified_since = None + if file_get_properties_from_compute_node_options is not None: + if_unmodified_since = file_get_properties_from_compute_node_options.if_unmodified_since + + # Construct URL + url = self.get_properties_from_compute_node.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'ocp-creation-time': 'rfc-1123', + 'ocp-batch-file-isdirectory': 'bool', + 'ocp-batch-file-url': 'str', + 'ocp-batch-file-mode': 'str', + 'Content-Type': 'str', + 'Content-Length': 'long', + }) + return client_raw_response + get_properties_from_compute_node.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/files/{filePath}'} + + def list_from_task( + self, job_id, task_id, recursive=None, file_list_from_task_options=None, custom_headers=None, raw=False, **operation_config): + """Lists the files in a task's directory on its compute node. + + :param job_id: The ID of the job that contains the task. + :type job_id: str + :param task_id: The ID of the task whose files you want to list. + :type task_id: str + :param recursive: Whether to list children of the task directory. This + parameter can be used in combination with the filter parameter to list + specific type of files. + :type recursive: bool + :param file_list_from_task_options: Additional parameters for the + operation + :type file_list_from_task_options: + ~azure.batch.models.FileListFromTaskOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of NodeFile + :rtype: + ~azure.batch.models.NodeFilePaged[~azure.batch.models.NodeFile] + :raises: + :class:`BatchErrorException` + """ + filter = None + if file_list_from_task_options is not None: + filter = file_list_from_task_options.filter + max_results = None + if file_list_from_task_options is not None: + max_results = file_list_from_task_options.max_results + timeout = None + if file_list_from_task_options is not None: + timeout = file_list_from_task_options.timeout + client_request_id = None + if file_list_from_task_options is not None: + client_request_id = file_list_from_task_options.client_request_id + return_client_request_id = None + if file_list_from_task_options is not None: + return_client_request_id = file_list_from_task_options.return_client_request_id + ocp_date = None + if file_list_from_task_options is not None: + ocp_date = file_list_from_task_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_from_task.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if recursive is not None: + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.NodeFilePaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.NodeFilePaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_from_task.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/files'} + + def list_from_compute_node( + self, pool_id, node_id, recursive=None, file_list_from_compute_node_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the files in task directories on the specified compute + node. + + :param pool_id: The ID of the pool that contains the compute node. + :type pool_id: str + :param node_id: The ID of the compute node whose files you want to + list. + :type node_id: str + :param recursive: Whether to list children of a directory. + :type recursive: bool + :param file_list_from_compute_node_options: Additional parameters for + the operation + :type file_list_from_compute_node_options: + ~azure.batch.models.FileListFromComputeNodeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of NodeFile + :rtype: + ~azure.batch.models.NodeFilePaged[~azure.batch.models.NodeFile] + :raises: + :class:`BatchErrorException` + """ + filter = None + if file_list_from_compute_node_options is not None: + filter = file_list_from_compute_node_options.filter + max_results = None + if file_list_from_compute_node_options is not None: + max_results = file_list_from_compute_node_options.max_results + timeout = None + if file_list_from_compute_node_options is not None: + timeout = file_list_from_compute_node_options.timeout + client_request_id = None + if file_list_from_compute_node_options is not None: + client_request_id = file_list_from_compute_node_options.client_request_id + return_client_request_id = None + if file_list_from_compute_node_options is not None: + return_client_request_id = file_list_from_compute_node_options.return_client_request_id + ocp_date = None + if file_list_from_compute_node_options is not None: + ocp_date = file_list_from_compute_node_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_from_compute_node.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if recursive is not None: + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.NodeFilePaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.NodeFilePaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_from_compute_node.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/files'} diff --git a/azext/generated/sdk/batch/v2018_12_01/operations/job_operations.py b/azext/generated/sdk/batch/v2018_12_01/operations/job_operations.py new file mode 100644 index 00000000..6ff09080 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/operations/job_operations.py @@ -0,0 +1,1439 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class JobOperations(object): + """JobOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2018-12-01.8.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2018-12-01.8.0" + + self.config = config + + def get_all_lifetime_statistics( + self, job_get_all_lifetime_statistics_options=None, custom_headers=None, raw=False, **operation_config): + """Gets lifetime summary statistics for all of the jobs in the specified + account. + + Statistics are aggregated across all jobs that have ever existed in the + account, from account creation to the last update time of the + statistics. The statistics may not be immediately available. The Batch + service performs periodic roll-up of statistics. The typical delay is + about 30 minutes. + + :param job_get_all_lifetime_statistics_options: Additional parameters + for the operation + :type job_get_all_lifetime_statistics_options: + ~azure.batch.models.JobGetAllLifetimeStatisticsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: JobStatistics or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.JobStatistics or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_get_all_lifetime_statistics_options is not None: + timeout = job_get_all_lifetime_statistics_options.timeout + client_request_id = None + if job_get_all_lifetime_statistics_options is not None: + client_request_id = job_get_all_lifetime_statistics_options.client_request_id + return_client_request_id = None + if job_get_all_lifetime_statistics_options is not None: + return_client_request_id = job_get_all_lifetime_statistics_options.return_client_request_id + ocp_date = None + if job_get_all_lifetime_statistics_options is not None: + ocp_date = job_get_all_lifetime_statistics_options.ocp_date + + # Construct URL + url = self.get_all_lifetime_statistics.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('JobStatistics', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_all_lifetime_statistics.metadata = {'url': '/lifetimejobstats'} + + def delete( + self, job_id, job_delete_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a job. + + Deleting a job also deletes all tasks that are part of that job, and + all job statistics. This also overrides the retention period for task + data; that is, if the job contains tasks which are still retained on + compute nodes, the Batch services deletes those tasks' working + directories and all their contents. When a Delete Job request is + received, the Batch service sets the job to the deleting state. All + update operations on a job that is in deleting state will fail with + status code 409 (Conflict), with additional information indicating that + the job is being deleted. + + :param job_id: The ID of the job to delete. + :type job_id: str + :param job_delete_options: Additional parameters for the operation + :type job_delete_options: ~azure.batch.models.JobDeleteOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_delete_options is not None: + timeout = job_delete_options.timeout + client_request_id = None + if job_delete_options is not None: + client_request_id = job_delete_options.client_request_id + return_client_request_id = None + if job_delete_options is not None: + return_client_request_id = job_delete_options.return_client_request_id + ocp_date = None + if job_delete_options is not None: + ocp_date = job_delete_options.ocp_date + if_match = None + if job_delete_options is not None: + if_match = job_delete_options.if_match + if_none_match = None + if job_delete_options is not None: + if_none_match = job_delete_options.if_none_match + if_modified_since = None + if job_delete_options is not None: + if_modified_since = job_delete_options.if_modified_since + if_unmodified_since = None + if job_delete_options is not None: + if_unmodified_since = job_delete_options.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete.metadata = {'url': '/jobs/{jobId}'} + + def get( + self, job_id, job_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified job. + + :param job_id: The ID of the job. + :type job_id: str + :param job_get_options: Additional parameters for the operation + :type job_get_options: ~azure.batch.models.JobGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: CloudJob or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.CloudJob or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if job_get_options is not None: + select = job_get_options.select + expand = None + if job_get_options is not None: + expand = job_get_options.expand + timeout = None + if job_get_options is not None: + timeout = job_get_options.timeout + client_request_id = None + if job_get_options is not None: + client_request_id = job_get_options.client_request_id + return_client_request_id = None + if job_get_options is not None: + return_client_request_id = job_get_options.return_client_request_id + ocp_date = None + if job_get_options is not None: + ocp_date = job_get_options.ocp_date + if_match = None + if job_get_options is not None: + if_match = job_get_options.if_match + if_none_match = None + if job_get_options is not None: + if_none_match = job_get_options.if_none_match + if_modified_since = None + if job_get_options is not None: + if_modified_since = job_get_options.if_modified_since + if_unmodified_since = None + if job_get_options is not None: + if_unmodified_since = job_get_options.if_unmodified_since + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('CloudJob', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/jobs/{jobId}'} + + def patch( + self, job_id, job_patch_parameter, job_patch_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified job. + + This replaces only the job properties specified in the request. For + example, if the job has constraints, and a request does not specify the + constraints element, then the job keeps the existing constraints. + + :param job_id: The ID of the job whose properties you want to update. + :type job_id: str + :param job_patch_parameter: The parameters for the request. + :type job_patch_parameter: ~azure.batch.models.JobPatchParameter + :param job_patch_options: Additional parameters for the operation + :type job_patch_options: ~azure.batch.models.JobPatchOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_patch_options is not None: + timeout = job_patch_options.timeout + client_request_id = None + if job_patch_options is not None: + client_request_id = job_patch_options.client_request_id + return_client_request_id = None + if job_patch_options is not None: + return_client_request_id = job_patch_options.return_client_request_id + ocp_date = None + if job_patch_options is not None: + ocp_date = job_patch_options.ocp_date + if_match = None + if job_patch_options is not None: + if_match = job_patch_options.if_match + if_none_match = None + if job_patch_options is not None: + if_none_match = job_patch_options.if_none_match + if_modified_since = None + if job_patch_options is not None: + if_modified_since = job_patch_options.if_modified_since + if_unmodified_since = None + if job_patch_options is not None: + if_unmodified_since = job_patch_options.if_unmodified_since + + # Construct URL + url = self.patch.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job_patch_parameter, 'JobPatchParameter') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + patch.metadata = {'url': '/jobs/{jobId}'} + + def update( + self, job_id, job_update_parameter, job_update_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified job. + + This fully replaces all the updatable properties of the job. For + example, if the job has constraints associated with it and if + constraints is not specified with this request, then the Batch service + will remove the existing constraints. + + :param job_id: The ID of the job whose properties you want to update. + :type job_id: str + :param job_update_parameter: The parameters for the request. + :type job_update_parameter: ~azure.batch.models.JobUpdateParameter + :param job_update_options: Additional parameters for the operation + :type job_update_options: ~azure.batch.models.JobUpdateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_update_options is not None: + timeout = job_update_options.timeout + client_request_id = None + if job_update_options is not None: + client_request_id = job_update_options.client_request_id + return_client_request_id = None + if job_update_options is not None: + return_client_request_id = job_update_options.return_client_request_id + ocp_date = None + if job_update_options is not None: + ocp_date = job_update_options.ocp_date + if_match = None + if job_update_options is not None: + if_match = job_update_options.if_match + if_none_match = None + if job_update_options is not None: + if_none_match = job_update_options.if_none_match + if_modified_since = None + if job_update_options is not None: + if_modified_since = job_update_options.if_modified_since + if_unmodified_since = None + if job_update_options is not None: + if_unmodified_since = job_update_options.if_unmodified_since + + # Construct URL + url = self.update.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job_update_parameter, 'JobUpdateParameter') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + update.metadata = {'url': '/jobs/{jobId}'} + + def disable( + self, job_id, disable_tasks, job_disable_options=None, custom_headers=None, raw=False, **operation_config): + """Disables the specified job, preventing new tasks from running. + + The Batch Service immediately moves the job to the disabling state. + Batch then uses the disableTasks parameter to determine what to do with + the currently running tasks of the job. The job remains in the + disabling state until the disable operation is completed and all tasks + have been dealt with according to the disableTasks option; the job then + moves to the disabled state. No new tasks are started under the job + until it moves back to active state. If you try to disable a job that + is in any state other than active, disabling, or disabled, the request + fails with status code 409. + + :param job_id: The ID of the job to disable. + :type job_id: str + :param disable_tasks: What to do with active tasks associated with the + job. Possible values include: 'requeue', 'terminate', 'wait' + :type disable_tasks: str or ~azure.batch.models.DisableJobOption + :param job_disable_options: Additional parameters for the operation + :type job_disable_options: ~azure.batch.models.JobDisableOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_disable_options is not None: + timeout = job_disable_options.timeout + client_request_id = None + if job_disable_options is not None: + client_request_id = job_disable_options.client_request_id + return_client_request_id = None + if job_disable_options is not None: + return_client_request_id = job_disable_options.return_client_request_id + ocp_date = None + if job_disable_options is not None: + ocp_date = job_disable_options.ocp_date + if_match = None + if job_disable_options is not None: + if_match = job_disable_options.if_match + if_none_match = None + if job_disable_options is not None: + if_none_match = job_disable_options.if_none_match + if_modified_since = None + if job_disable_options is not None: + if_modified_since = job_disable_options.if_modified_since + if_unmodified_since = None + if job_disable_options is not None: + if_unmodified_since = job_disable_options.if_unmodified_since + job_disable_parameter = models.JobDisableParameter(disable_tasks=disable_tasks) + + # Construct URL + url = self.disable.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job_disable_parameter, 'JobDisableParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + disable.metadata = {'url': '/jobs/{jobId}/disable'} + + def enable( + self, job_id, job_enable_options=None, custom_headers=None, raw=False, **operation_config): + """Enables the specified job, allowing new tasks to run. + + When you call this API, the Batch service sets a disabled job to the + enabling state. After the this operation is completed, the job moves to + the active state, and scheduling of new tasks under the job resumes. + The Batch service does not allow a task to remain in the active state + for more than 180 days. Therefore, if you enable a job containing + active tasks which were added more than 180 days ago, those tasks will + not run. + + :param job_id: The ID of the job to enable. + :type job_id: str + :param job_enable_options: Additional parameters for the operation + :type job_enable_options: ~azure.batch.models.JobEnableOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_enable_options is not None: + timeout = job_enable_options.timeout + client_request_id = None + if job_enable_options is not None: + client_request_id = job_enable_options.client_request_id + return_client_request_id = None + if job_enable_options is not None: + return_client_request_id = job_enable_options.return_client_request_id + ocp_date = None + if job_enable_options is not None: + ocp_date = job_enable_options.ocp_date + if_match = None + if job_enable_options is not None: + if_match = job_enable_options.if_match + if_none_match = None + if job_enable_options is not None: + if_none_match = job_enable_options.if_none_match + if_modified_since = None + if job_enable_options is not None: + if_modified_since = job_enable_options.if_modified_since + if_unmodified_since = None + if job_enable_options is not None: + if_unmodified_since = job_enable_options.if_unmodified_since + + # Construct URL + url = self.enable.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + enable.metadata = {'url': '/jobs/{jobId}/enable'} + + def terminate( + self, job_id, terminate_reason=None, job_terminate_options=None, custom_headers=None, raw=False, **operation_config): + """Terminates the specified job, marking it as completed. + + When a Terminate Job request is received, the Batch service sets the + job to the terminating state. The Batch service then terminates any + running tasks associated with the job and runs any required job release + tasks. Then the job moves into the completed state. If there are any + tasks in the job in the active state, they will remain in the active + state. Once a job is terminated, new tasks cannot be added and any + remaining active tasks will not be scheduled. + + :param job_id: The ID of the job to terminate. + :type job_id: str + :param terminate_reason: The text you want to appear as the job's + TerminateReason. The default is 'UserTerminate'. + :type terminate_reason: str + :param job_terminate_options: Additional parameters for the operation + :type job_terminate_options: ~azure.batch.models.JobTerminateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_terminate_options is not None: + timeout = job_terminate_options.timeout + client_request_id = None + if job_terminate_options is not None: + client_request_id = job_terminate_options.client_request_id + return_client_request_id = None + if job_terminate_options is not None: + return_client_request_id = job_terminate_options.return_client_request_id + ocp_date = None + if job_terminate_options is not None: + ocp_date = job_terminate_options.ocp_date + if_match = None + if job_terminate_options is not None: + if_match = job_terminate_options.if_match + if_none_match = None + if job_terminate_options is not None: + if_none_match = job_terminate_options.if_none_match + if_modified_since = None + if job_terminate_options is not None: + if_modified_since = job_terminate_options.if_modified_since + if_unmodified_since = None + if job_terminate_options is not None: + if_unmodified_since = job_terminate_options.if_unmodified_since + job_terminate_parameter = None + if terminate_reason is not None: + job_terminate_parameter = models.JobTerminateParameter(terminate_reason=terminate_reason) + + # Construct URL + url = self.terminate.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + if job_terminate_parameter is not None: + body_content = self._serialize.body(job_terminate_parameter, 'JobTerminateParameter') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + terminate.metadata = {'url': '/jobs/{jobId}/terminate'} + + def add( + self, job, job_add_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a job to the specified account. + + The Batch service supports two ways to control the work done as part of + a job. In the first approach, the user specifies a Job Manager task. + The Batch service launches this task when it is ready to start the job. + The Job Manager task controls all other tasks that run under this job, + by using the Task APIs. In the second approach, the user directly + controls the execution of tasks under an active job, by using the Task + APIs. Also note: when naming jobs, avoid including sensitive + information such as user names or secret project names. This + information may appear in telemetry logs accessible to Microsoft + Support engineers. + + :param job: The job to be added. + :type job: ~azure.batch.models.JobAddParameter + :param job_add_options: Additional parameters for the operation + :type job_add_options: ~azure.batch.models.JobAddOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_add_options is not None: + timeout = job_add_options.timeout + client_request_id = None + if job_add_options is not None: + client_request_id = job_add_options.client_request_id + return_client_request_id = None + if job_add_options is not None: + return_client_request_id = job_add_options.return_client_request_id + ocp_date = None + if job_add_options is not None: + ocp_date = job_add_options.ocp_date + + # Construct URL + url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job, 'JobAddParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add.metadata = {'url': '/jobs'} + + def list( + self, job_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the jobs in the specified account. + + :param job_list_options: Additional parameters for the operation + :type job_list_options: ~azure.batch.models.JobListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of CloudJob + :rtype: + ~azure.batch.models.CloudJobPaged[~azure.batch.models.CloudJob] + :raises: + :class:`BatchErrorException` + """ + filter = None + if job_list_options is not None: + filter = job_list_options.filter + select = None + if job_list_options is not None: + select = job_list_options.select + expand = None + if job_list_options is not None: + expand = job_list_options.expand + max_results = None + if job_list_options is not None: + max_results = job_list_options.max_results + timeout = None + if job_list_options is not None: + timeout = job_list_options.timeout + client_request_id = None + if job_list_options is not None: + client_request_id = job_list_options.client_request_id + return_client_request_id = None + if job_list_options is not None: + return_client_request_id = job_list_options.return_client_request_id + ocp_date = None + if job_list_options is not None: + ocp_date = job_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CloudJobPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CloudJobPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/jobs'} + + def list_from_job_schedule( + self, job_schedule_id, job_list_from_job_schedule_options=None, custom_headers=None, raw=False, **operation_config): + """Lists the jobs that have been created under the specified job schedule. + + :param job_schedule_id: The ID of the job schedule from which you want + to get a list of jobs. + :type job_schedule_id: str + :param job_list_from_job_schedule_options: Additional parameters for + the operation + :type job_list_from_job_schedule_options: + ~azure.batch.models.JobListFromJobScheduleOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of CloudJob + :rtype: + ~azure.batch.models.CloudJobPaged[~azure.batch.models.CloudJob] + :raises: + :class:`BatchErrorException` + """ + filter = None + if job_list_from_job_schedule_options is not None: + filter = job_list_from_job_schedule_options.filter + select = None + if job_list_from_job_schedule_options is not None: + select = job_list_from_job_schedule_options.select + expand = None + if job_list_from_job_schedule_options is not None: + expand = job_list_from_job_schedule_options.expand + max_results = None + if job_list_from_job_schedule_options is not None: + max_results = job_list_from_job_schedule_options.max_results + timeout = None + if job_list_from_job_schedule_options is not None: + timeout = job_list_from_job_schedule_options.timeout + client_request_id = None + if job_list_from_job_schedule_options is not None: + client_request_id = job_list_from_job_schedule_options.client_request_id + return_client_request_id = None + if job_list_from_job_schedule_options is not None: + return_client_request_id = job_list_from_job_schedule_options.return_client_request_id + ocp_date = None + if job_list_from_job_schedule_options is not None: + ocp_date = job_list_from_job_schedule_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_from_job_schedule.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CloudJobPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CloudJobPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_from_job_schedule.metadata = {'url': '/jobschedules/{jobScheduleId}/jobs'} + + def list_preparation_and_release_task_status( + self, job_id, job_list_preparation_and_release_task_status_options=None, custom_headers=None, raw=False, **operation_config): + """Lists the execution status of the Job Preparation and Job Release task + for the specified job across the compute nodes where the job has run. + + This API returns the Job Preparation and Job Release task status on all + compute nodes that have run the Job Preparation or Job Release task. + This includes nodes which have since been removed from the pool. If + this API is invoked on a job which has no Job Preparation or Job + Release task, the Batch service returns HTTP status code 409 (Conflict) + with an error code of JobPreparationTaskNotSpecified. + + :param job_id: The ID of the job. + :type job_id: str + :param job_list_preparation_and_release_task_status_options: + Additional parameters for the operation + :type job_list_preparation_and_release_task_status_options: + ~azure.batch.models.JobListPreparationAndReleaseTaskStatusOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of + JobPreparationAndReleaseTaskExecutionInformation + :rtype: + ~azure.batch.models.JobPreparationAndReleaseTaskExecutionInformationPaged[~azure.batch.models.JobPreparationAndReleaseTaskExecutionInformation] + :raises: + :class:`BatchErrorException` + """ + filter = None + if job_list_preparation_and_release_task_status_options is not None: + filter = job_list_preparation_and_release_task_status_options.filter + select = None + if job_list_preparation_and_release_task_status_options is not None: + select = job_list_preparation_and_release_task_status_options.select + max_results = None + if job_list_preparation_and_release_task_status_options is not None: + max_results = job_list_preparation_and_release_task_status_options.max_results + timeout = None + if job_list_preparation_and_release_task_status_options is not None: + timeout = job_list_preparation_and_release_task_status_options.timeout + client_request_id = None + if job_list_preparation_and_release_task_status_options is not None: + client_request_id = job_list_preparation_and_release_task_status_options.client_request_id + return_client_request_id = None + if job_list_preparation_and_release_task_status_options is not None: + return_client_request_id = job_list_preparation_and_release_task_status_options.return_client_request_id + ocp_date = None + if job_list_preparation_and_release_task_status_options is not None: + ocp_date = job_list_preparation_and_release_task_status_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_preparation_and_release_task_status.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.JobPreparationAndReleaseTaskExecutionInformationPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.JobPreparationAndReleaseTaskExecutionInformationPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_preparation_and_release_task_status.metadata = {'url': '/jobs/{jobId}/jobpreparationandreleasetaskstatus'} + + def get_task_counts( + self, job_id, job_get_task_counts_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the task counts for the specified job. + + Task counts provide a count of the tasks by active, running or + completed task state, and a count of tasks which succeeded or failed. + Tasks in the preparing state are counted as running. + + :param job_id: The ID of the job. + :type job_id: str + :param job_get_task_counts_options: Additional parameters for the + operation + :type job_get_task_counts_options: + ~azure.batch.models.JobGetTaskCountsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: TaskCounts or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.TaskCounts or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_get_task_counts_options is not None: + timeout = job_get_task_counts_options.timeout + client_request_id = None + if job_get_task_counts_options is not None: + client_request_id = job_get_task_counts_options.client_request_id + return_client_request_id = None + if job_get_task_counts_options is not None: + return_client_request_id = job_get_task_counts_options.return_client_request_id + ocp_date = None + if job_get_task_counts_options is not None: + ocp_date = job_get_task_counts_options.ocp_date + + # Construct URL + url = self.get_task_counts.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('TaskCounts', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_task_counts.metadata = {'url': '/jobs/{jobId}/taskcounts'} diff --git a/azext/generated/sdk/batch/v2018_12_01/operations/job_schedule_operations.py b/azext/generated/sdk/batch/v2018_12_01/operations/job_schedule_operations.py new file mode 100644 index 00000000..a186cf13 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/operations/job_schedule_operations.py @@ -0,0 +1,1093 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class JobScheduleOperations(object): + """JobScheduleOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2018-12-01.8.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2018-12-01.8.0" + + self.config = config + + def exists( + self, job_schedule_id, job_schedule_exists_options=None, custom_headers=None, raw=False, **operation_config): + """Checks the specified job schedule exists. + + :param job_schedule_id: The ID of the job schedule which you want to + check. + :type job_schedule_id: str + :param job_schedule_exists_options: Additional parameters for the + operation + :type job_schedule_exists_options: + ~azure.batch.models.JobScheduleExistsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: bool or ClientRawResponse if raw=true + :rtype: bool or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_exists_options is not None: + timeout = job_schedule_exists_options.timeout + client_request_id = None + if job_schedule_exists_options is not None: + client_request_id = job_schedule_exists_options.client_request_id + return_client_request_id = None + if job_schedule_exists_options is not None: + return_client_request_id = job_schedule_exists_options.return_client_request_id + ocp_date = None + if job_schedule_exists_options is not None: + ocp_date = job_schedule_exists_options.ocp_date + if_match = None + if job_schedule_exists_options is not None: + if_match = job_schedule_exists_options.if_match + if_none_match = None + if job_schedule_exists_options is not None: + if_none_match = job_schedule_exists_options.if_none_match + if_modified_since = None + if job_schedule_exists_options is not None: + if_modified_since = job_schedule_exists_options.if_modified_since + if_unmodified_since = None + if job_schedule_exists_options is not None: + if_unmodified_since = job_schedule_exists_options.if_unmodified_since + + # Construct URL + url = self.exists.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 404]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = (response.status_code == 200) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + }) + return client_raw_response + return deserialized + exists.metadata = {'url': '/jobschedules/{jobScheduleId}'} + + def delete( + self, job_schedule_id, job_schedule_delete_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a job schedule from the specified account. + + When you delete a job schedule, this also deletes all jobs and tasks + under that schedule. When tasks are deleted, all the files in their + working directories on the compute nodes are also deleted (the + retention period is ignored). The job schedule statistics are no longer + accessible once the job schedule is deleted, though they are still + counted towards account lifetime statistics. + + :param job_schedule_id: The ID of the job schedule to delete. + :type job_schedule_id: str + :param job_schedule_delete_options: Additional parameters for the + operation + :type job_schedule_delete_options: + ~azure.batch.models.JobScheduleDeleteOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_delete_options is not None: + timeout = job_schedule_delete_options.timeout + client_request_id = None + if job_schedule_delete_options is not None: + client_request_id = job_schedule_delete_options.client_request_id + return_client_request_id = None + if job_schedule_delete_options is not None: + return_client_request_id = job_schedule_delete_options.return_client_request_id + ocp_date = None + if job_schedule_delete_options is not None: + ocp_date = job_schedule_delete_options.ocp_date + if_match = None + if job_schedule_delete_options is not None: + if_match = job_schedule_delete_options.if_match + if_none_match = None + if job_schedule_delete_options is not None: + if_none_match = job_schedule_delete_options.if_none_match + if_modified_since = None + if job_schedule_delete_options is not None: + if_modified_since = job_schedule_delete_options.if_modified_since + if_unmodified_since = None + if job_schedule_delete_options is not None: + if_unmodified_since = job_schedule_delete_options.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete.metadata = {'url': '/jobschedules/{jobScheduleId}'} + + def get( + self, job_schedule_id, job_schedule_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified job schedule. + + :param job_schedule_id: The ID of the job schedule to get. + :type job_schedule_id: str + :param job_schedule_get_options: Additional parameters for the + operation + :type job_schedule_get_options: + ~azure.batch.models.JobScheduleGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: CloudJobSchedule or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.CloudJobSchedule or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if job_schedule_get_options is not None: + select = job_schedule_get_options.select + expand = None + if job_schedule_get_options is not None: + expand = job_schedule_get_options.expand + timeout = None + if job_schedule_get_options is not None: + timeout = job_schedule_get_options.timeout + client_request_id = None + if job_schedule_get_options is not None: + client_request_id = job_schedule_get_options.client_request_id + return_client_request_id = None + if job_schedule_get_options is not None: + return_client_request_id = job_schedule_get_options.return_client_request_id + ocp_date = None + if job_schedule_get_options is not None: + ocp_date = job_schedule_get_options.ocp_date + if_match = None + if job_schedule_get_options is not None: + if_match = job_schedule_get_options.if_match + if_none_match = None + if job_schedule_get_options is not None: + if_none_match = job_schedule_get_options.if_none_match + if_modified_since = None + if job_schedule_get_options is not None: + if_modified_since = job_schedule_get_options.if_modified_since + if_unmodified_since = None + if job_schedule_get_options is not None: + if_unmodified_since = job_schedule_get_options.if_unmodified_since + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('CloudJobSchedule', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/jobschedules/{jobScheduleId}'} + + def patch( + self, job_schedule_id, job_schedule_patch_parameter, job_schedule_patch_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified job schedule. + + This replaces only the job schedule properties specified in the + request. For example, if the schedule property is not specified with + this request, then the Batch service will keep the existing schedule. + Changes to a job schedule only impact jobs created by the schedule + after the update has taken place; currently running jobs are + unaffected. + + :param job_schedule_id: The ID of the job schedule to update. + :type job_schedule_id: str + :param job_schedule_patch_parameter: The parameters for the request. + :type job_schedule_patch_parameter: + ~azure.batch.models.JobSchedulePatchParameter + :param job_schedule_patch_options: Additional parameters for the + operation + :type job_schedule_patch_options: + ~azure.batch.models.JobSchedulePatchOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_patch_options is not None: + timeout = job_schedule_patch_options.timeout + client_request_id = None + if job_schedule_patch_options is not None: + client_request_id = job_schedule_patch_options.client_request_id + return_client_request_id = None + if job_schedule_patch_options is not None: + return_client_request_id = job_schedule_patch_options.return_client_request_id + ocp_date = None + if job_schedule_patch_options is not None: + ocp_date = job_schedule_patch_options.ocp_date + if_match = None + if job_schedule_patch_options is not None: + if_match = job_schedule_patch_options.if_match + if_none_match = None + if job_schedule_patch_options is not None: + if_none_match = job_schedule_patch_options.if_none_match + if_modified_since = None + if job_schedule_patch_options is not None: + if_modified_since = job_schedule_patch_options.if_modified_since + if_unmodified_since = None + if job_schedule_patch_options is not None: + if_unmodified_since = job_schedule_patch_options.if_unmodified_since + + # Construct URL + url = self.patch.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job_schedule_patch_parameter, 'JobSchedulePatchParameter') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + patch.metadata = {'url': '/jobschedules/{jobScheduleId}'} + + def update( + self, job_schedule_id, job_schedule_update_parameter, job_schedule_update_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified job schedule. + + This fully replaces all the updatable properties of the job schedule. + For example, if the schedule property is not specified with this + request, then the Batch service will remove the existing schedule. + Changes to a job schedule only impact jobs created by the schedule + after the update has taken place; currently running jobs are + unaffected. + + :param job_schedule_id: The ID of the job schedule to update. + :type job_schedule_id: str + :param job_schedule_update_parameter: The parameters for the request. + :type job_schedule_update_parameter: + ~azure.batch.models.JobScheduleUpdateParameter + :param job_schedule_update_options: Additional parameters for the + operation + :type job_schedule_update_options: + ~azure.batch.models.JobScheduleUpdateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_update_options is not None: + timeout = job_schedule_update_options.timeout + client_request_id = None + if job_schedule_update_options is not None: + client_request_id = job_schedule_update_options.client_request_id + return_client_request_id = None + if job_schedule_update_options is not None: + return_client_request_id = job_schedule_update_options.return_client_request_id + ocp_date = None + if job_schedule_update_options is not None: + ocp_date = job_schedule_update_options.ocp_date + if_match = None + if job_schedule_update_options is not None: + if_match = job_schedule_update_options.if_match + if_none_match = None + if job_schedule_update_options is not None: + if_none_match = job_schedule_update_options.if_none_match + if_modified_since = None + if job_schedule_update_options is not None: + if_modified_since = job_schedule_update_options.if_modified_since + if_unmodified_since = None + if job_schedule_update_options is not None: + if_unmodified_since = job_schedule_update_options.if_unmodified_since + + # Construct URL + url = self.update.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job_schedule_update_parameter, 'JobScheduleUpdateParameter') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + update.metadata = {'url': '/jobschedules/{jobScheduleId}'} + + def disable( + self, job_schedule_id, job_schedule_disable_options=None, custom_headers=None, raw=False, **operation_config): + """Disables a job schedule. + + No new jobs will be created until the job schedule is enabled again. + + :param job_schedule_id: The ID of the job schedule to disable. + :type job_schedule_id: str + :param job_schedule_disable_options: Additional parameters for the + operation + :type job_schedule_disable_options: + ~azure.batch.models.JobScheduleDisableOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_disable_options is not None: + timeout = job_schedule_disable_options.timeout + client_request_id = None + if job_schedule_disable_options is not None: + client_request_id = job_schedule_disable_options.client_request_id + return_client_request_id = None + if job_schedule_disable_options is not None: + return_client_request_id = job_schedule_disable_options.return_client_request_id + ocp_date = None + if job_schedule_disable_options is not None: + ocp_date = job_schedule_disable_options.ocp_date + if_match = None + if job_schedule_disable_options is not None: + if_match = job_schedule_disable_options.if_match + if_none_match = None + if job_schedule_disable_options is not None: + if_none_match = job_schedule_disable_options.if_none_match + if_modified_since = None + if job_schedule_disable_options is not None: + if_modified_since = job_schedule_disable_options.if_modified_since + if_unmodified_since = None + if job_schedule_disable_options is not None: + if_unmodified_since = job_schedule_disable_options.if_unmodified_since + + # Construct URL + url = self.disable.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + disable.metadata = {'url': '/jobschedules/{jobScheduleId}/disable'} + + def enable( + self, job_schedule_id, job_schedule_enable_options=None, custom_headers=None, raw=False, **operation_config): + """Enables a job schedule. + + :param job_schedule_id: The ID of the job schedule to enable. + :type job_schedule_id: str + :param job_schedule_enable_options: Additional parameters for the + operation + :type job_schedule_enable_options: + ~azure.batch.models.JobScheduleEnableOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_enable_options is not None: + timeout = job_schedule_enable_options.timeout + client_request_id = None + if job_schedule_enable_options is not None: + client_request_id = job_schedule_enable_options.client_request_id + return_client_request_id = None + if job_schedule_enable_options is not None: + return_client_request_id = job_schedule_enable_options.return_client_request_id + ocp_date = None + if job_schedule_enable_options is not None: + ocp_date = job_schedule_enable_options.ocp_date + if_match = None + if job_schedule_enable_options is not None: + if_match = job_schedule_enable_options.if_match + if_none_match = None + if job_schedule_enable_options is not None: + if_none_match = job_schedule_enable_options.if_none_match + if_modified_since = None + if job_schedule_enable_options is not None: + if_modified_since = job_schedule_enable_options.if_modified_since + if_unmodified_since = None + if job_schedule_enable_options is not None: + if_unmodified_since = job_schedule_enable_options.if_unmodified_since + + # Construct URL + url = self.enable.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + enable.metadata = {'url': '/jobschedules/{jobScheduleId}/enable'} + + def terminate( + self, job_schedule_id, job_schedule_terminate_options=None, custom_headers=None, raw=False, **operation_config): + """Terminates a job schedule. + + :param job_schedule_id: The ID of the job schedule to terminates. + :type job_schedule_id: str + :param job_schedule_terminate_options: Additional parameters for the + operation + :type job_schedule_terminate_options: + ~azure.batch.models.JobScheduleTerminateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_terminate_options is not None: + timeout = job_schedule_terminate_options.timeout + client_request_id = None + if job_schedule_terminate_options is not None: + client_request_id = job_schedule_terminate_options.client_request_id + return_client_request_id = None + if job_schedule_terminate_options is not None: + return_client_request_id = job_schedule_terminate_options.return_client_request_id + ocp_date = None + if job_schedule_terminate_options is not None: + ocp_date = job_schedule_terminate_options.ocp_date + if_match = None + if job_schedule_terminate_options is not None: + if_match = job_schedule_terminate_options.if_match + if_none_match = None + if job_schedule_terminate_options is not None: + if_none_match = job_schedule_terminate_options.if_none_match + if_modified_since = None + if job_schedule_terminate_options is not None: + if_modified_since = job_schedule_terminate_options.if_modified_since + if_unmodified_since = None + if job_schedule_terminate_options is not None: + if_unmodified_since = job_schedule_terminate_options.if_unmodified_since + + # Construct URL + url = self.terminate.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + terminate.metadata = {'url': '/jobschedules/{jobScheduleId}/terminate'} + + def add( + self, cloud_job_schedule, job_schedule_add_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a job schedule to the specified account. + + :param cloud_job_schedule: The job schedule to be added. + :type cloud_job_schedule: ~azure.batch.models.JobScheduleAddParameter + :param job_schedule_add_options: Additional parameters for the + operation + :type job_schedule_add_options: + ~azure.batch.models.JobScheduleAddOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_add_options is not None: + timeout = job_schedule_add_options.timeout + client_request_id = None + if job_schedule_add_options is not None: + client_request_id = job_schedule_add_options.client_request_id + return_client_request_id = None + if job_schedule_add_options is not None: + return_client_request_id = job_schedule_add_options.return_client_request_id + ocp_date = None + if job_schedule_add_options is not None: + ocp_date = job_schedule_add_options.ocp_date + + # Construct URL + url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(cloud_job_schedule, 'JobScheduleAddParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add.metadata = {'url': '/jobschedules'} + + def list( + self, job_schedule_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the job schedules in the specified account. + + :param job_schedule_list_options: Additional parameters for the + operation + :type job_schedule_list_options: + ~azure.batch.models.JobScheduleListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of CloudJobSchedule + :rtype: + ~azure.batch.models.CloudJobSchedulePaged[~azure.batch.models.CloudJobSchedule] + :raises: + :class:`BatchErrorException` + """ + filter = None + if job_schedule_list_options is not None: + filter = job_schedule_list_options.filter + select = None + if job_schedule_list_options is not None: + select = job_schedule_list_options.select + expand = None + if job_schedule_list_options is not None: + expand = job_schedule_list_options.expand + max_results = None + if job_schedule_list_options is not None: + max_results = job_schedule_list_options.max_results + timeout = None + if job_schedule_list_options is not None: + timeout = job_schedule_list_options.timeout + client_request_id = None + if job_schedule_list_options is not None: + client_request_id = job_schedule_list_options.client_request_id + return_client_request_id = None + if job_schedule_list_options is not None: + return_client_request_id = job_schedule_list_options.return_client_request_id + ocp_date = None + if job_schedule_list_options is not None: + ocp_date = job_schedule_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CloudJobSchedulePaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CloudJobSchedulePaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/jobschedules'} diff --git a/azext/generated/sdk/batch/v2018_12_01/operations/pool_operations.py b/azext/generated/sdk/batch/v2018_12_01/operations/pool_operations.py new file mode 100644 index 00000000..9e7253d1 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/operations/pool_operations.py @@ -0,0 +1,1635 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class PoolOperations(object): + """PoolOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2018-12-01.8.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2018-12-01.8.0" + + self.config = config + + def list_usage_metrics( + self, pool_list_usage_metrics_options=None, custom_headers=None, raw=False, **operation_config): + """Lists the usage metrics, aggregated by pool across individual time + intervals, for the specified account. + + If you do not specify a $filter clause including a poolId, the response + includes all pools that existed in the account in the time range of the + returned aggregation intervals. If you do not specify a $filter clause + including a startTime or endTime these filters default to the start and + end times of the last aggregation interval currently available; that + is, only the last aggregation interval is returned. + + :param pool_list_usage_metrics_options: Additional parameters for the + operation + :type pool_list_usage_metrics_options: + ~azure.batch.models.PoolListUsageMetricsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of PoolUsageMetrics + :rtype: + ~azure.batch.models.PoolUsageMetricsPaged[~azure.batch.models.PoolUsageMetrics] + :raises: + :class:`BatchErrorException` + """ + start_time = None + if pool_list_usage_metrics_options is not None: + start_time = pool_list_usage_metrics_options.start_time + end_time = None + if pool_list_usage_metrics_options is not None: + end_time = pool_list_usage_metrics_options.end_time + filter = None + if pool_list_usage_metrics_options is not None: + filter = pool_list_usage_metrics_options.filter + max_results = None + if pool_list_usage_metrics_options is not None: + max_results = pool_list_usage_metrics_options.max_results + timeout = None + if pool_list_usage_metrics_options is not None: + timeout = pool_list_usage_metrics_options.timeout + client_request_id = None + if pool_list_usage_metrics_options is not None: + client_request_id = pool_list_usage_metrics_options.client_request_id + return_client_request_id = None + if pool_list_usage_metrics_options is not None: + return_client_request_id = pool_list_usage_metrics_options.return_client_request_id + ocp_date = None + if pool_list_usage_metrics_options is not None: + ocp_date = pool_list_usage_metrics_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_usage_metrics.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if start_time is not None: + query_parameters['starttime'] = self._serialize.query("start_time", start_time, 'iso-8601') + if end_time is not None: + query_parameters['endtime'] = self._serialize.query("end_time", end_time, 'iso-8601') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.PoolUsageMetricsPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.PoolUsageMetricsPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_usage_metrics.metadata = {'url': '/poolusagemetrics'} + + def get_all_lifetime_statistics( + self, pool_get_all_lifetime_statistics_options=None, custom_headers=None, raw=False, **operation_config): + """Gets lifetime summary statistics for all of the pools in the specified + account. + + Statistics are aggregated across all pools that have ever existed in + the account, from account creation to the last update time of the + statistics. The statistics may not be immediately available. The Batch + service performs periodic roll-up of statistics. The typical delay is + about 30 minutes. + + :param pool_get_all_lifetime_statistics_options: Additional parameters + for the operation + :type pool_get_all_lifetime_statistics_options: + ~azure.batch.models.PoolGetAllLifetimeStatisticsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PoolStatistics or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.PoolStatistics or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_get_all_lifetime_statistics_options is not None: + timeout = pool_get_all_lifetime_statistics_options.timeout + client_request_id = None + if pool_get_all_lifetime_statistics_options is not None: + client_request_id = pool_get_all_lifetime_statistics_options.client_request_id + return_client_request_id = None + if pool_get_all_lifetime_statistics_options is not None: + return_client_request_id = pool_get_all_lifetime_statistics_options.return_client_request_id + ocp_date = None + if pool_get_all_lifetime_statistics_options is not None: + ocp_date = pool_get_all_lifetime_statistics_options.ocp_date + + # Construct URL + url = self.get_all_lifetime_statistics.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('PoolStatistics', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_all_lifetime_statistics.metadata = {'url': '/lifetimepoolstats'} + + def add( + self, pool, pool_add_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a pool to the specified account. + + When naming pools, avoid including sensitive information such as user + names or secret project names. This information may appear in telemetry + logs accessible to Microsoft Support engineers. + + :param pool: The pool to be added. + :type pool: ~azure.batch.models.PoolAddParameter + :param pool_add_options: Additional parameters for the operation + :type pool_add_options: ~azure.batch.models.PoolAddOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_add_options is not None: + timeout = pool_add_options.timeout + client_request_id = None + if pool_add_options is not None: + client_request_id = pool_add_options.client_request_id + return_client_request_id = None + if pool_add_options is not None: + return_client_request_id = pool_add_options.return_client_request_id + ocp_date = None + if pool_add_options is not None: + ocp_date = pool_add_options.ocp_date + + # Construct URL + url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool, 'PoolAddParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add.metadata = {'url': '/pools'} + + def list( + self, pool_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the pools in the specified account. + + :param pool_list_options: Additional parameters for the operation + :type pool_list_options: ~azure.batch.models.PoolListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of CloudPool + :rtype: + ~azure.batch.models.CloudPoolPaged[~azure.batch.models.CloudPool] + :raises: + :class:`BatchErrorException` + """ + filter = None + if pool_list_options is not None: + filter = pool_list_options.filter + select = None + if pool_list_options is not None: + select = pool_list_options.select + expand = None + if pool_list_options is not None: + expand = pool_list_options.expand + max_results = None + if pool_list_options is not None: + max_results = pool_list_options.max_results + timeout = None + if pool_list_options is not None: + timeout = pool_list_options.timeout + client_request_id = None + if pool_list_options is not None: + client_request_id = pool_list_options.client_request_id + return_client_request_id = None + if pool_list_options is not None: + return_client_request_id = pool_list_options.return_client_request_id + ocp_date = None + if pool_list_options is not None: + ocp_date = pool_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CloudPoolPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CloudPoolPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/pools'} + + def delete( + self, pool_id, pool_delete_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a pool from the specified account. + + When you request that a pool be deleted, the following actions occur: + the pool state is set to deleting; any ongoing resize operation on the + pool are stopped; the Batch service starts resizing the pool to zero + nodes; any tasks running on existing nodes are terminated and requeued + (as if a resize pool operation had been requested with the default + requeue option); finally, the pool is removed from the system. Because + running tasks are requeued, the user can rerun these tasks by updating + their job to target a different pool. The tasks can then run on the new + pool. If you want to override the requeue behavior, then you should + call resize pool explicitly to shrink the pool to zero size before + deleting the pool. If you call an Update, Patch or Delete API on a pool + in the deleting state, it will fail with HTTP status code 409 with + error code PoolBeingDeleted. + + :param pool_id: The ID of the pool to delete. + :type pool_id: str + :param pool_delete_options: Additional parameters for the operation + :type pool_delete_options: ~azure.batch.models.PoolDeleteOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_delete_options is not None: + timeout = pool_delete_options.timeout + client_request_id = None + if pool_delete_options is not None: + client_request_id = pool_delete_options.client_request_id + return_client_request_id = None + if pool_delete_options is not None: + return_client_request_id = pool_delete_options.return_client_request_id + ocp_date = None + if pool_delete_options is not None: + ocp_date = pool_delete_options.ocp_date + if_match = None + if pool_delete_options is not None: + if_match = pool_delete_options.if_match + if_none_match = None + if pool_delete_options is not None: + if_none_match = pool_delete_options.if_none_match + if_modified_since = None + if pool_delete_options is not None: + if_modified_since = pool_delete_options.if_modified_since + if_unmodified_since = None + if pool_delete_options is not None: + if_unmodified_since = pool_delete_options.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete.metadata = {'url': '/pools/{poolId}'} + + def exists( + self, pool_id, pool_exists_options=None, custom_headers=None, raw=False, **operation_config): + """Gets basic properties of a pool. + + :param pool_id: The ID of the pool to get. + :type pool_id: str + :param pool_exists_options: Additional parameters for the operation + :type pool_exists_options: ~azure.batch.models.PoolExistsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: bool or ClientRawResponse if raw=true + :rtype: bool or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_exists_options is not None: + timeout = pool_exists_options.timeout + client_request_id = None + if pool_exists_options is not None: + client_request_id = pool_exists_options.client_request_id + return_client_request_id = None + if pool_exists_options is not None: + return_client_request_id = pool_exists_options.return_client_request_id + ocp_date = None + if pool_exists_options is not None: + ocp_date = pool_exists_options.ocp_date + if_match = None + if pool_exists_options is not None: + if_match = pool_exists_options.if_match + if_none_match = None + if pool_exists_options is not None: + if_none_match = pool_exists_options.if_none_match + if_modified_since = None + if pool_exists_options is not None: + if_modified_since = pool_exists_options.if_modified_since + if_unmodified_since = None + if pool_exists_options is not None: + if_unmodified_since = pool_exists_options.if_unmodified_since + + # Construct URL + url = self.exists.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 404]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = (response.status_code == 200) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + }) + return client_raw_response + return deserialized + exists.metadata = {'url': '/pools/{poolId}'} + + def get( + self, pool_id, pool_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified pool. + + :param pool_id: The ID of the pool to get. + :type pool_id: str + :param pool_get_options: Additional parameters for the operation + :type pool_get_options: ~azure.batch.models.PoolGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: CloudPool or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.CloudPool or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if pool_get_options is not None: + select = pool_get_options.select + expand = None + if pool_get_options is not None: + expand = pool_get_options.expand + timeout = None + if pool_get_options is not None: + timeout = pool_get_options.timeout + client_request_id = None + if pool_get_options is not None: + client_request_id = pool_get_options.client_request_id + return_client_request_id = None + if pool_get_options is not None: + return_client_request_id = pool_get_options.return_client_request_id + ocp_date = None + if pool_get_options is not None: + ocp_date = pool_get_options.ocp_date + if_match = None + if pool_get_options is not None: + if_match = pool_get_options.if_match + if_none_match = None + if pool_get_options is not None: + if_none_match = pool_get_options.if_none_match + if_modified_since = None + if pool_get_options is not None: + if_modified_since = pool_get_options.if_modified_since + if_unmodified_since = None + if pool_get_options is not None: + if_unmodified_since = pool_get_options.if_unmodified_since + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('CloudPool', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/pools/{poolId}'} + + def patch( + self, pool_id, pool_patch_parameter, pool_patch_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified pool. + + This only replaces the pool properties specified in the request. For + example, if the pool has a start task associated with it, and a request + does not specify a start task element, then the pool keeps the existing + start task. + + :param pool_id: The ID of the pool to update. + :type pool_id: str + :param pool_patch_parameter: The parameters for the request. + :type pool_patch_parameter: ~azure.batch.models.PoolPatchParameter + :param pool_patch_options: Additional parameters for the operation + :type pool_patch_options: ~azure.batch.models.PoolPatchOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_patch_options is not None: + timeout = pool_patch_options.timeout + client_request_id = None + if pool_patch_options is not None: + client_request_id = pool_patch_options.client_request_id + return_client_request_id = None + if pool_patch_options is not None: + return_client_request_id = pool_patch_options.return_client_request_id + ocp_date = None + if pool_patch_options is not None: + ocp_date = pool_patch_options.ocp_date + if_match = None + if pool_patch_options is not None: + if_match = pool_patch_options.if_match + if_none_match = None + if pool_patch_options is not None: + if_none_match = pool_patch_options.if_none_match + if_modified_since = None + if pool_patch_options is not None: + if_modified_since = pool_patch_options.if_modified_since + if_unmodified_since = None + if pool_patch_options is not None: + if_unmodified_since = pool_patch_options.if_unmodified_since + + # Construct URL + url = self.patch.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool_patch_parameter, 'PoolPatchParameter') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + patch.metadata = {'url': '/pools/{poolId}'} + + def disable_auto_scale( + self, pool_id, pool_disable_auto_scale_options=None, custom_headers=None, raw=False, **operation_config): + """Disables automatic scaling for a pool. + + :param pool_id: The ID of the pool on which to disable automatic + scaling. + :type pool_id: str + :param pool_disable_auto_scale_options: Additional parameters for the + operation + :type pool_disable_auto_scale_options: + ~azure.batch.models.PoolDisableAutoScaleOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_disable_auto_scale_options is not None: + timeout = pool_disable_auto_scale_options.timeout + client_request_id = None + if pool_disable_auto_scale_options is not None: + client_request_id = pool_disable_auto_scale_options.client_request_id + return_client_request_id = None + if pool_disable_auto_scale_options is not None: + return_client_request_id = pool_disable_auto_scale_options.return_client_request_id + ocp_date = None + if pool_disable_auto_scale_options is not None: + ocp_date = pool_disable_auto_scale_options.ocp_date + + # Construct URL + url = self.disable_auto_scale.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + disable_auto_scale.metadata = {'url': '/pools/{poolId}/disableautoscale'} + + def enable_auto_scale( + self, pool_id, auto_scale_formula=None, auto_scale_evaluation_interval=None, pool_enable_auto_scale_options=None, custom_headers=None, raw=False, **operation_config): + """Enables automatic scaling for a pool. + + You cannot enable automatic scaling on a pool if a resize operation is + in progress on the pool. If automatic scaling of the pool is currently + disabled, you must specify a valid autoscale formula as part of the + request. If automatic scaling of the pool is already enabled, you may + specify a new autoscale formula and/or a new evaluation interval. You + cannot call this API for the same pool more than once every 30 seconds. + + :param pool_id: The ID of the pool on which to enable automatic + scaling. + :type pool_id: str + :param auto_scale_formula: The formula for the desired number of + compute nodes in the pool. The formula is checked for validity before + it is applied to the pool. If the formula is not valid, the Batch + service rejects the request with detailed error information. For more + information about specifying this formula, see Automatically scale + compute nodes in an Azure Batch pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the pool size according to the autoscale formula. + The default value is 15 minutes. The minimum and maximum value are 5 + minutes and 168 hours respectively. If you specify a value less than 5 + minutes or greater than 168 hours, the Batch service rejects the + request with an invalid property value error; if you are calling the + REST API directly, the HTTP status code is 400 (Bad Request). If you + specify a new interval, then the existing autoscale evaluation + schedule will be stopped and a new autoscale evaluation schedule will + be started, with its starting time being the time when this request + was issued. + :type auto_scale_evaluation_interval: timedelta + :param pool_enable_auto_scale_options: Additional parameters for the + operation + :type pool_enable_auto_scale_options: + ~azure.batch.models.PoolEnableAutoScaleOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_enable_auto_scale_options is not None: + timeout = pool_enable_auto_scale_options.timeout + client_request_id = None + if pool_enable_auto_scale_options is not None: + client_request_id = pool_enable_auto_scale_options.client_request_id + return_client_request_id = None + if pool_enable_auto_scale_options is not None: + return_client_request_id = pool_enable_auto_scale_options.return_client_request_id + ocp_date = None + if pool_enable_auto_scale_options is not None: + ocp_date = pool_enable_auto_scale_options.ocp_date + if_match = None + if pool_enable_auto_scale_options is not None: + if_match = pool_enable_auto_scale_options.if_match + if_none_match = None + if pool_enable_auto_scale_options is not None: + if_none_match = pool_enable_auto_scale_options.if_none_match + if_modified_since = None + if pool_enable_auto_scale_options is not None: + if_modified_since = pool_enable_auto_scale_options.if_modified_since + if_unmodified_since = None + if pool_enable_auto_scale_options is not None: + if_unmodified_since = pool_enable_auto_scale_options.if_unmodified_since + pool_enable_auto_scale_parameter = models.PoolEnableAutoScaleParameter(auto_scale_formula=auto_scale_formula, auto_scale_evaluation_interval=auto_scale_evaluation_interval) + + # Construct URL + url = self.enable_auto_scale.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool_enable_auto_scale_parameter, 'PoolEnableAutoScaleParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + enable_auto_scale.metadata = {'url': '/pools/{poolId}/enableautoscale'} + + def evaluate_auto_scale( + self, pool_id, auto_scale_formula, pool_evaluate_auto_scale_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the result of evaluating an automatic scaling formula on the pool. + + This API is primarily for validating an autoscale formula, as it simply + returns the result without applying the formula to the pool. The pool + must have auto scaling enabled in order to evaluate a formula. + + :param pool_id: The ID of the pool on which to evaluate the automatic + scaling formula. + :type pool_id: str + :param auto_scale_formula: The formula for the desired number of + compute nodes in the pool. The formula is validated and its results + calculated, but it is not applied to the pool. To apply the formula to + the pool, 'Enable automatic scaling on a pool'. For more information + about specifying this formula, see Automatically scale compute nodes + in an Azure Batch pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + :param pool_evaluate_auto_scale_options: Additional parameters for the + operation + :type pool_evaluate_auto_scale_options: + ~azure.batch.models.PoolEvaluateAutoScaleOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: AutoScaleRun or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.AutoScaleRun or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_evaluate_auto_scale_options is not None: + timeout = pool_evaluate_auto_scale_options.timeout + client_request_id = None + if pool_evaluate_auto_scale_options is not None: + client_request_id = pool_evaluate_auto_scale_options.client_request_id + return_client_request_id = None + if pool_evaluate_auto_scale_options is not None: + return_client_request_id = pool_evaluate_auto_scale_options.return_client_request_id + ocp_date = None + if pool_evaluate_auto_scale_options is not None: + ocp_date = pool_evaluate_auto_scale_options.ocp_date + pool_evaluate_auto_scale_parameter = models.PoolEvaluateAutoScaleParameter(auto_scale_formula=auto_scale_formula) + + # Construct URL + url = self.evaluate_auto_scale.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool_evaluate_auto_scale_parameter, 'PoolEvaluateAutoScaleParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('AutoScaleRun', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + evaluate_auto_scale.metadata = {'url': '/pools/{poolId}/evaluateautoscale'} + + def resize( + self, pool_id, pool_resize_parameter, pool_resize_options=None, custom_headers=None, raw=False, **operation_config): + """Changes the number of compute nodes that are assigned to a pool. + + You can only resize a pool when its allocation state is steady. If the + pool is already resizing, the request fails with status code 409. When + you resize a pool, the pool's allocation state changes from steady to + resizing. You cannot resize pools which are configured for automatic + scaling. If you try to do this, the Batch service returns an error 409. + If you resize a pool downwards, the Batch service chooses which nodes + to remove. To remove specific nodes, use the pool remove nodes API + instead. + + :param pool_id: The ID of the pool to resize. + :type pool_id: str + :param pool_resize_parameter: The parameters for the request. + :type pool_resize_parameter: ~azure.batch.models.PoolResizeParameter + :param pool_resize_options: Additional parameters for the operation + :type pool_resize_options: ~azure.batch.models.PoolResizeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_resize_options is not None: + timeout = pool_resize_options.timeout + client_request_id = None + if pool_resize_options is not None: + client_request_id = pool_resize_options.client_request_id + return_client_request_id = None + if pool_resize_options is not None: + return_client_request_id = pool_resize_options.return_client_request_id + ocp_date = None + if pool_resize_options is not None: + ocp_date = pool_resize_options.ocp_date + if_match = None + if pool_resize_options is not None: + if_match = pool_resize_options.if_match + if_none_match = None + if pool_resize_options is not None: + if_none_match = pool_resize_options.if_none_match + if_modified_since = None + if pool_resize_options is not None: + if_modified_since = pool_resize_options.if_modified_since + if_unmodified_since = None + if pool_resize_options is not None: + if_unmodified_since = pool_resize_options.if_unmodified_since + + # Construct URL + url = self.resize.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool_resize_parameter, 'PoolResizeParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + resize.metadata = {'url': '/pools/{poolId}/resize'} + + def stop_resize( + self, pool_id, pool_stop_resize_options=None, custom_headers=None, raw=False, **operation_config): + """Stops an ongoing resize operation on the pool. + + This does not restore the pool to its previous state before the resize + operation: it only stops any further changes being made, and the pool + maintains its current state. After stopping, the pool stabilizes at the + number of nodes it was at when the stop operation was done. During the + stop operation, the pool allocation state changes first to stopping and + then to steady. A resize operation need not be an explicit resize pool + request; this API can also be used to halt the initial sizing of the + pool when it is created. + + :param pool_id: The ID of the pool whose resizing you want to stop. + :type pool_id: str + :param pool_stop_resize_options: Additional parameters for the + operation + :type pool_stop_resize_options: + ~azure.batch.models.PoolStopResizeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_stop_resize_options is not None: + timeout = pool_stop_resize_options.timeout + client_request_id = None + if pool_stop_resize_options is not None: + client_request_id = pool_stop_resize_options.client_request_id + return_client_request_id = None + if pool_stop_resize_options is not None: + return_client_request_id = pool_stop_resize_options.return_client_request_id + ocp_date = None + if pool_stop_resize_options is not None: + ocp_date = pool_stop_resize_options.ocp_date + if_match = None + if pool_stop_resize_options is not None: + if_match = pool_stop_resize_options.if_match + if_none_match = None + if pool_stop_resize_options is not None: + if_none_match = pool_stop_resize_options.if_none_match + if_modified_since = None + if pool_stop_resize_options is not None: + if_modified_since = pool_stop_resize_options.if_modified_since + if_unmodified_since = None + if pool_stop_resize_options is not None: + if_unmodified_since = pool_stop_resize_options.if_unmodified_since + + # Construct URL + url = self.stop_resize.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + stop_resize.metadata = {'url': '/pools/{poolId}/stopresize'} + + def update_properties( + self, pool_id, pool_update_properties_parameter, pool_update_properties_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified pool. + + This fully replaces all the updatable properties of the pool. For + example, if the pool has a start task associated with it and if start + task is not specified with this request, then the Batch service will + remove the existing start task. + + :param pool_id: The ID of the pool to update. + :type pool_id: str + :param pool_update_properties_parameter: The parameters for the + request. + :type pool_update_properties_parameter: + ~azure.batch.models.PoolUpdatePropertiesParameter + :param pool_update_properties_options: Additional parameters for the + operation + :type pool_update_properties_options: + ~azure.batch.models.PoolUpdatePropertiesOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_update_properties_options is not None: + timeout = pool_update_properties_options.timeout + client_request_id = None + if pool_update_properties_options is not None: + client_request_id = pool_update_properties_options.client_request_id + return_client_request_id = None + if pool_update_properties_options is not None: + return_client_request_id = pool_update_properties_options.return_client_request_id + ocp_date = None + if pool_update_properties_options is not None: + ocp_date = pool_update_properties_options.ocp_date + + # Construct URL + url = self.update_properties.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool_update_properties_parameter, 'PoolUpdatePropertiesParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + update_properties.metadata = {'url': '/pools/{poolId}/updateproperties'} + + def remove_nodes( + self, pool_id, node_remove_parameter, pool_remove_nodes_options=None, custom_headers=None, raw=False, **operation_config): + """Removes compute nodes from the specified pool. + + This operation can only run when the allocation state of the pool is + steady. When this operation runs, the allocation state changes from + steady to resizing. + + :param pool_id: The ID of the pool from which you want to remove + nodes. + :type pool_id: str + :param node_remove_parameter: The parameters for the request. + :type node_remove_parameter: ~azure.batch.models.NodeRemoveParameter + :param pool_remove_nodes_options: Additional parameters for the + operation + :type pool_remove_nodes_options: + ~azure.batch.models.PoolRemoveNodesOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_remove_nodes_options is not None: + timeout = pool_remove_nodes_options.timeout + client_request_id = None + if pool_remove_nodes_options is not None: + client_request_id = pool_remove_nodes_options.client_request_id + return_client_request_id = None + if pool_remove_nodes_options is not None: + return_client_request_id = pool_remove_nodes_options.return_client_request_id + ocp_date = None + if pool_remove_nodes_options is not None: + ocp_date = pool_remove_nodes_options.ocp_date + if_match = None + if pool_remove_nodes_options is not None: + if_match = pool_remove_nodes_options.if_match + if_none_match = None + if pool_remove_nodes_options is not None: + if_none_match = pool_remove_nodes_options.if_none_match + if_modified_since = None + if pool_remove_nodes_options is not None: + if_modified_since = pool_remove_nodes_options.if_modified_since + if_unmodified_since = None + if pool_remove_nodes_options is not None: + if_unmodified_since = pool_remove_nodes_options.if_unmodified_since + + # Construct URL + url = self.remove_nodes.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(node_remove_parameter, 'NodeRemoveParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + remove_nodes.metadata = {'url': '/pools/{poolId}/removenodes'} diff --git a/azext/generated/sdk/batch/v2018_12_01/operations/task_operations.py b/azext/generated/sdk/batch/v2018_12_01/operations/task_operations.py new file mode 100644 index 00000000..fb1ae648 --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/operations/task_operations.py @@ -0,0 +1,1027 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class TaskOperations(object): + """TaskOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2018-12-01.8.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2018-12-01.8.0" + + self.config = config + + def add( + self, job_id, task, task_add_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a task to the specified job. + + The maximum lifetime of a task from addition to completion is 180 days. + If a task has not completed within 180 days of being added it will be + terminated by the Batch service and left in whatever state it was in at + that time. + + :param job_id: The ID of the job to which the task is to be added. + :type job_id: str + :param task: The task to be added. + :type task: ~azure.batch.models.TaskAddParameter + :param task_add_options: Additional parameters for the operation + :type task_add_options: ~azure.batch.models.TaskAddOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_add_options is not None: + timeout = task_add_options.timeout + client_request_id = None + if task_add_options is not None: + client_request_id = task_add_options.client_request_id + return_client_request_id = None + if task_add_options is not None: + return_client_request_id = task_add_options.return_client_request_id + ocp_date = None + if task_add_options is not None: + ocp_date = task_add_options.ocp_date + + # Construct URL + url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(task, 'TaskAddParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add.metadata = {'url': '/jobs/{jobId}/tasks'} + + def list( + self, job_id, task_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the tasks that are associated with the specified job. + + For multi-instance tasks, information such as affinityId, executionInfo + and nodeInfo refer to the primary task. Use the list subtasks API to + retrieve information about subtasks. + + :param job_id: The ID of the job. + :type job_id: str + :param task_list_options: Additional parameters for the operation + :type task_list_options: ~azure.batch.models.TaskListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of CloudTask + :rtype: + ~azure.batch.models.CloudTaskPaged[~azure.batch.models.CloudTask] + :raises: + :class:`BatchErrorException` + """ + filter = None + if task_list_options is not None: + filter = task_list_options.filter + select = None + if task_list_options is not None: + select = task_list_options.select + expand = None + if task_list_options is not None: + expand = task_list_options.expand + max_results = None + if task_list_options is not None: + max_results = task_list_options.max_results + timeout = None + if task_list_options is not None: + timeout = task_list_options.timeout + client_request_id = None + if task_list_options is not None: + client_request_id = task_list_options.client_request_id + return_client_request_id = None + if task_list_options is not None: + return_client_request_id = task_list_options.return_client_request_id + ocp_date = None + if task_list_options is not None: + ocp_date = task_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CloudTaskPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CloudTaskPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/jobs/{jobId}/tasks'} + + def add_collection( + self, job_id, value, task_add_collection_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a collection of tasks to the specified job. + + Note that each task must have a unique ID. The Batch service may not + return the results for each task in the same order the tasks were + submitted in this request. If the server times out or the connection is + closed during the request, the request may have been partially or fully + processed, or not at all. In such cases, the user should re-issue the + request. Note that it is up to the user to correctly handle failures + when re-issuing a request. For example, you should use the same task + IDs during a retry so that if the prior operation succeeded, the retry + will not create extra tasks unexpectedly. If the response contains any + tasks which failed to add, a client can retry the request. In a retry, + it is most efficient to resubmit only tasks that failed to add, and to + omit tasks that were successfully added on the first attempt. The + maximum lifetime of a task from addition to completion is 180 days. If + a task has not completed within 180 days of being added it will be + terminated by the Batch service and left in whatever state it was in at + that time. + + :param job_id: The ID of the job to which the task collection is to be + added. + :type job_id: str + :param value: The collection of tasks to add. The maximum count of + tasks is 100. The total serialized size of this collection must be + less than 1MB. If it is greater than 1MB (for example if each task has + 100's of resource files or environment variables), the request will + fail with code 'RequestBodyTooLarge' and should be retried again with + fewer tasks. + :type value: list[~azure.batch.models.TaskAddParameter] + :param task_add_collection_options: Additional parameters for the + operation + :type task_add_collection_options: + ~azure.batch.models.TaskAddCollectionOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: TaskAddCollectionResult or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.TaskAddCollectionResult or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_add_collection_options is not None: + timeout = task_add_collection_options.timeout + client_request_id = None + if task_add_collection_options is not None: + client_request_id = task_add_collection_options.client_request_id + return_client_request_id = None + if task_add_collection_options is not None: + return_client_request_id = task_add_collection_options.return_client_request_id + ocp_date = None + if task_add_collection_options is not None: + ocp_date = task_add_collection_options.ocp_date + task_collection = models.TaskAddCollectionParameter(value=value) + + # Construct URL + url = self.add_collection.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(task_collection, 'TaskAddCollectionParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('TaskAddCollectionResult', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + add_collection.metadata = {'url': '/jobs/{jobId}/addtaskcollection'} + + def delete( + self, job_id, task_id, task_delete_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a task from the specified job. + + When a task is deleted, all of the files in its directory on the + compute node where it ran are also deleted (regardless of the retention + time). For multi-instance tasks, the delete task operation applies + synchronously to the primary task; subtasks and their files are then + deleted asynchronously in the background. + + :param job_id: The ID of the job from which to delete the task. + :type job_id: str + :param task_id: The ID of the task to delete. + :type task_id: str + :param task_delete_options: Additional parameters for the operation + :type task_delete_options: ~azure.batch.models.TaskDeleteOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_delete_options is not None: + timeout = task_delete_options.timeout + client_request_id = None + if task_delete_options is not None: + client_request_id = task_delete_options.client_request_id + return_client_request_id = None + if task_delete_options is not None: + return_client_request_id = task_delete_options.return_client_request_id + ocp_date = None + if task_delete_options is not None: + ocp_date = task_delete_options.ocp_date + if_match = None + if task_delete_options is not None: + if_match = task_delete_options.if_match + if_none_match = None + if task_delete_options is not None: + if_none_match = task_delete_options.if_none_match + if_modified_since = None + if task_delete_options is not None: + if_modified_since = task_delete_options.if_modified_since + if_unmodified_since = None + if task_delete_options is not None: + if_unmodified_since = task_delete_options.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}'} + + def get( + self, job_id, task_id, task_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified task. + + For multi-instance tasks, information such as affinityId, executionInfo + and nodeInfo refer to the primary task. Use the list subtasks API to + retrieve information about subtasks. + + :param job_id: The ID of the job that contains the task. + :type job_id: str + :param task_id: The ID of the task to get information about. + :type task_id: str + :param task_get_options: Additional parameters for the operation + :type task_get_options: ~azure.batch.models.TaskGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: CloudTask or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.CloudTask or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if task_get_options is not None: + select = task_get_options.select + expand = None + if task_get_options is not None: + expand = task_get_options.expand + timeout = None + if task_get_options is not None: + timeout = task_get_options.timeout + client_request_id = None + if task_get_options is not None: + client_request_id = task_get_options.client_request_id + return_client_request_id = None + if task_get_options is not None: + return_client_request_id = task_get_options.return_client_request_id + ocp_date = None + if task_get_options is not None: + ocp_date = task_get_options.ocp_date + if_match = None + if task_get_options is not None: + if_match = task_get_options.if_match + if_none_match = None + if task_get_options is not None: + if_none_match = task_get_options.if_none_match + if_modified_since = None + if task_get_options is not None: + if_modified_since = task_get_options.if_modified_since + if_unmodified_since = None + if task_get_options is not None: + if_unmodified_since = task_get_options.if_unmodified_since + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('CloudTask', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}'} + + def update( + self, job_id, task_id, constraints=None, task_update_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified task. + + :param job_id: The ID of the job containing the task. + :type job_id: str + :param task_id: The ID of the task to update. + :type task_id: str + :param constraints: Constraints that apply to this task. If omitted, + the task is given the default constraints. For multi-instance tasks, + updating the retention time applies only to the primary task and not + subtasks. + :type constraints: ~azure.batch.models.TaskConstraints + :param task_update_options: Additional parameters for the operation + :type task_update_options: ~azure.batch.models.TaskUpdateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_update_options is not None: + timeout = task_update_options.timeout + client_request_id = None + if task_update_options is not None: + client_request_id = task_update_options.client_request_id + return_client_request_id = None + if task_update_options is not None: + return_client_request_id = task_update_options.return_client_request_id + ocp_date = None + if task_update_options is not None: + ocp_date = task_update_options.ocp_date + if_match = None + if task_update_options is not None: + if_match = task_update_options.if_match + if_none_match = None + if task_update_options is not None: + if_none_match = task_update_options.if_none_match + if_modified_since = None + if task_update_options is not None: + if_modified_since = task_update_options.if_modified_since + if_unmodified_since = None + if task_update_options is not None: + if_unmodified_since = task_update_options.if_unmodified_since + task_update_parameter = models.TaskUpdateParameter(constraints=constraints) + + # Construct URL + url = self.update.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(task_update_parameter, 'TaskUpdateParameter') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + update.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}'} + + def list_subtasks( + self, job_id, task_id, task_list_subtasks_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the subtasks that are associated with the specified + multi-instance task. + + If the task is not a multi-instance task then this returns an empty + collection. + + :param job_id: The ID of the job. + :type job_id: str + :param task_id: The ID of the task. + :type task_id: str + :param task_list_subtasks_options: Additional parameters for the + operation + :type task_list_subtasks_options: + ~azure.batch.models.TaskListSubtasksOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: CloudTaskListSubtasksResult or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.CloudTaskListSubtasksResult or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if task_list_subtasks_options is not None: + select = task_list_subtasks_options.select + timeout = None + if task_list_subtasks_options is not None: + timeout = task_list_subtasks_options.timeout + client_request_id = None + if task_list_subtasks_options is not None: + client_request_id = task_list_subtasks_options.client_request_id + return_client_request_id = None + if task_list_subtasks_options is not None: + return_client_request_id = task_list_subtasks_options.return_client_request_id + ocp_date = None + if task_list_subtasks_options is not None: + ocp_date = task_list_subtasks_options.ocp_date + + # Construct URL + url = self.list_subtasks.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('CloudTaskListSubtasksResult', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + list_subtasks.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/subtasksinfo'} + + def terminate( + self, job_id, task_id, task_terminate_options=None, custom_headers=None, raw=False, **operation_config): + """Terminates the specified task. + + When the task has been terminated, it moves to the completed state. For + multi-instance tasks, the terminate task operation applies + synchronously to the primary task; subtasks are then terminated + asynchronously in the background. + + :param job_id: The ID of the job containing the task. + :type job_id: str + :param task_id: The ID of the task to terminate. + :type task_id: str + :param task_terminate_options: Additional parameters for the operation + :type task_terminate_options: ~azure.batch.models.TaskTerminateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_terminate_options is not None: + timeout = task_terminate_options.timeout + client_request_id = None + if task_terminate_options is not None: + client_request_id = task_terminate_options.client_request_id + return_client_request_id = None + if task_terminate_options is not None: + return_client_request_id = task_terminate_options.return_client_request_id + ocp_date = None + if task_terminate_options is not None: + ocp_date = task_terminate_options.ocp_date + if_match = None + if task_terminate_options is not None: + if_match = task_terminate_options.if_match + if_none_match = None + if task_terminate_options is not None: + if_none_match = task_terminate_options.if_none_match + if_modified_since = None + if task_terminate_options is not None: + if_modified_since = task_terminate_options.if_modified_since + if_unmodified_since = None + if task_terminate_options is not None: + if_unmodified_since = task_terminate_options.if_unmodified_since + + # Construct URL + url = self.terminate.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + terminate.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/terminate'} + + def reactivate( + self, job_id, task_id, task_reactivate_options=None, custom_headers=None, raw=False, **operation_config): + """Reactivates a task, allowing it to run again even if its retry count + has been exhausted. + + Reactivation makes a task eligible to be retried again up to its + maximum retry count. The task's state is changed to active. As the task + is no longer in the completed state, any previous exit code or failure + information is no longer available after reactivation. Each time a task + is reactivated, its retry count is reset to 0. Reactivation will fail + for tasks that are not completed or that previously completed + successfully (with an exit code of 0). Additionally, it will fail if + the job has completed (or is terminating or deleting). + + :param job_id: The ID of the job containing the task. + :type job_id: str + :param task_id: The ID of the task to reactivate. + :type task_id: str + :param task_reactivate_options: Additional parameters for the + operation + :type task_reactivate_options: + ~azure.batch.models.TaskReactivateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_reactivate_options is not None: + timeout = task_reactivate_options.timeout + client_request_id = None + if task_reactivate_options is not None: + client_request_id = task_reactivate_options.client_request_id + return_client_request_id = None + if task_reactivate_options is not None: + return_client_request_id = task_reactivate_options.return_client_request_id + ocp_date = None + if task_reactivate_options is not None: + ocp_date = task_reactivate_options.ocp_date + if_match = None + if task_reactivate_options is not None: + if_match = task_reactivate_options.if_match + if_none_match = None + if task_reactivate_options is not None: + if_none_match = task_reactivate_options.if_none_match + if_modified_since = None + if task_reactivate_options is not None: + if_modified_since = task_reactivate_options.if_modified_since + if_unmodified_since = None + if task_reactivate_options is not None: + if_unmodified_since = task_reactivate_options.if_unmodified_since + + # Construct URL + url = self.reactivate.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + reactivate.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/reactivate'} diff --git a/azext/generated/sdk/batch/v2018_12_01/version.py b/azext/generated/sdk/batch/v2018_12_01/version.py new file mode 100644 index 00000000..84a99a3c --- /dev/null +++ b/azext/generated/sdk/batch/v2018_12_01/version.py @@ -0,0 +1,13 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +VERSION = "2018-12-01.8.0" + diff --git a/azext/generated/sdk/batch/v2019_06_01/__init__.py b/azext/generated/sdk/batch/v2019_06_01/__init__.py new file mode 100644 index 00000000..f27e0cb6 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/__init__.py @@ -0,0 +1,18 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .batch_service_client import BatchServiceClient +from .version import VERSION + +__all__ = ['BatchServiceClient'] + +__version__ = VERSION + diff --git a/azext/generated/sdk/batch/v2019_06_01/batch_service_client.py b/azext/generated/sdk/batch/v2019_06_01/batch_service_client.py new file mode 100644 index 00000000..ec275ff0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/batch_service_client.py @@ -0,0 +1,118 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.service_client import SDKClient +from msrest import Serializer, Deserializer +from msrestazure import AzureConfiguration +from .version import VERSION +from .operations.application_operations import ApplicationOperations +from .operations.pool_operations import PoolOperations +from .operations.account_operations import AccountOperations +from .operations.job_operations import JobOperations +from .operations.certificate_operations import CertificateOperations +from .operations.file_operations import FileOperations +from .operations.job_schedule_operations import JobScheduleOperations +from .operations.task_operations import TaskOperations +from .operations.compute_node_operations import ComputeNodeOperations +from . import models + + +class BatchServiceClientConfiguration(AzureConfiguration): + """Configuration for BatchServiceClient + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credentials: Credentials needed for the client to connect to Azure. + :type credentials: :mod:`A msrestazure Credentials + object` + :param batch_url: The base URL for all Azure Batch service requests. + :type batch_url: str + """ + + def __init__( + self, credentials, batch_url): + + if credentials is None: + raise ValueError("Parameter 'credentials' must not be None.") + if batch_url is None: + raise ValueError("Parameter 'batch_url' must not be None.") + base_url = '{batchUrl}' + + super(BatchServiceClientConfiguration, self).__init__(base_url) + + self.add_user_agent('azure-batch/{}'.format(VERSION)) + self.add_user_agent('Azure-SDK-For-Python') + + self.credentials = credentials + self.batch_url = batch_url + + +class BatchServiceClient(SDKClient): + """A client for issuing REST requests to the Azure Batch service. + + :ivar config: Configuration for client. + :vartype config: BatchServiceClientConfiguration + + :ivar application: Application operations + :vartype application: azure.batch.operations.ApplicationOperations + :ivar pool: Pool operations + :vartype pool: azure.batch.operations.PoolOperations + :ivar account: Account operations + :vartype account: azure.batch.operations.AccountOperations + :ivar job: Job operations + :vartype job: azure.batch.operations.JobOperations + :ivar certificate: Certificate operations + :vartype certificate: azure.batch.operations.CertificateOperations + :ivar file: File operations + :vartype file: azure.batch.operations.FileOperations + :ivar job_schedule: JobSchedule operations + :vartype job_schedule: azure.batch.operations.JobScheduleOperations + :ivar task: Task operations + :vartype task: azure.batch.operations.TaskOperations + :ivar compute_node: ComputeNode operations + :vartype compute_node: azure.batch.operations.ComputeNodeOperations + + :param credentials: Credentials needed for the client to connect to Azure. + :type credentials: :mod:`A msrestazure Credentials + object` + :param batch_url: The base URL for all Azure Batch service requests. + :type batch_url: str + """ + + def __init__( + self, credentials, batch_url): + + self.config = BatchServiceClientConfiguration(credentials, batch_url) + super(BatchServiceClient, self).__init__(self.config.credentials, self.config) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self.api_version = '2019-06-01.9.0' + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.application = ApplicationOperations( + self._client, self.config, self._serialize, self._deserialize) + self.pool = PoolOperations( + self._client, self.config, self._serialize, self._deserialize) + self.account = AccountOperations( + self._client, self.config, self._serialize, self._deserialize) + self.job = JobOperations( + self._client, self.config, self._serialize, self._deserialize) + self.certificate = CertificateOperations( + self._client, self.config, self._serialize, self._deserialize) + self.file = FileOperations( + self._client, self.config, self._serialize, self._deserialize) + self.job_schedule = JobScheduleOperations( + self._client, self.config, self._serialize, self._deserialize) + self.task = TaskOperations( + self._client, self.config, self._serialize, self._deserialize) + self.compute_node = ComputeNodeOperations( + self._client, self.config, self._serialize, self._deserialize) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/__init__.py b/azext/generated/sdk/batch/v2019_06_01/models/__init__.py new file mode 100644 index 00000000..89b0f654 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/__init__.py @@ -0,0 +1,726 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +try: + from .pool_usage_metrics_py3 import PoolUsageMetrics + from .image_reference_py3 import ImageReference + from .image_information_py3 import ImageInformation + from .authentication_token_settings_py3 import AuthenticationTokenSettings + from .usage_statistics_py3 import UsageStatistics + from .resource_statistics_py3 import ResourceStatistics + from .pool_statistics_py3 import PoolStatistics + from .job_statistics_py3 import JobStatistics + from .name_value_pair_py3 import NameValuePair + from .delete_certificate_error_py3 import DeleteCertificateError + from .certificate_py3 import Certificate + from .application_package_reference_py3 import ApplicationPackageReference + from .application_summary_py3 import ApplicationSummary + from .certificate_add_parameter_py3 import CertificateAddParameter + from .file_properties_py3 import FileProperties + from .node_file_py3 import NodeFile + from .schedule_py3 import Schedule + from .job_constraints_py3 import JobConstraints + from .job_network_configuration_py3 import JobNetworkConfiguration + from .container_registry_py3 import ContainerRegistry + from .task_container_settings_py3 import TaskContainerSettings + from .resource_file_py3 import ResourceFile + from .environment_setting_py3 import EnvironmentSetting + from .exit_options_py3 import ExitOptions + from .exit_code_mapping_py3 import ExitCodeMapping + from .exit_code_range_mapping_py3 import ExitCodeRangeMapping + from .exit_conditions_py3 import ExitConditions + from .auto_user_specification_py3 import AutoUserSpecification + from .user_identity_py3 import UserIdentity + from .linux_user_configuration_py3 import LinuxUserConfiguration + from .windows_user_configuration_py3 import WindowsUserConfiguration + from .user_account_py3 import UserAccount + from .task_constraints_py3 import TaskConstraints + from .output_file_blob_container_destination_py3 import OutputFileBlobContainerDestination + from .output_file_destination_py3 import OutputFileDestination + from .output_file_upload_options_py3 import OutputFileUploadOptions + from .output_file_py3 import OutputFile + from .job_manager_task_py3 import JobManagerTask + from .job_preparation_task_py3 import JobPreparationTask + from .job_release_task_py3 import JobReleaseTask + from .task_scheduling_policy_py3 import TaskSchedulingPolicy + from .start_task_py3 import StartTask + from .certificate_reference_py3 import CertificateReference + from .metadata_item_py3 import MetadataItem + from .cloud_service_configuration_py3 import CloudServiceConfiguration + from .windows_configuration_py3 import WindowsConfiguration + from .data_disk_py3 import DataDisk + from .container_configuration_py3 import ContainerConfiguration + from .virtual_machine_configuration_py3 import VirtualMachineConfiguration + from .network_security_group_rule_py3 import NetworkSecurityGroupRule + from .inbound_nat_pool_py3 import InboundNATPool + from .pool_endpoint_configuration_py3 import PoolEndpointConfiguration + from .network_configuration_py3 import NetworkConfiguration + from .pool_specification_py3 import PoolSpecification + from .auto_pool_specification_py3 import AutoPoolSpecification + from .pool_information_py3 import PoolInformation + from .job_specification_py3 import JobSpecification + from .recent_job_py3 import RecentJob + from .job_schedule_execution_information_py3 import JobScheduleExecutionInformation + from .job_schedule_statistics_py3 import JobScheduleStatistics + from .cloud_job_schedule_py3 import CloudJobSchedule + from .job_schedule_add_parameter_py3 import JobScheduleAddParameter + from .job_scheduling_error_py3 import JobSchedulingError + from .job_execution_information_py3 import JobExecutionInformation + from .cloud_job_py3 import CloudJob + from .job_add_parameter_py3 import JobAddParameter + from .task_container_execution_information_py3 import TaskContainerExecutionInformation + from .task_failure_information_py3 import TaskFailureInformation + from .job_preparation_task_execution_information_py3 import JobPreparationTaskExecutionInformation + from .job_release_task_execution_information_py3 import JobReleaseTaskExecutionInformation + from .job_preparation_and_release_task_execution_information_py3 import JobPreparationAndReleaseTaskExecutionInformation + from .task_counts_py3 import TaskCounts + from .auto_scale_run_error_py3 import AutoScaleRunError + from .auto_scale_run_py3 import AutoScaleRun + from .resize_error_py3 import ResizeError + from .cloud_pool_py3 import CloudPool + from .pool_add_parameter_py3 import PoolAddParameter + from .affinity_information_py3 import AffinityInformation + from .task_execution_information_py3 import TaskExecutionInformation + from .compute_node_information_py3 import ComputeNodeInformation + from .node_agent_information_py3 import NodeAgentInformation + from .multi_instance_settings_py3 import MultiInstanceSettings + from .task_statistics_py3 import TaskStatistics + from .task_id_range_py3 import TaskIdRange + from .task_dependencies_py3 import TaskDependencies + from .cloud_task_py3 import CloudTask + from .task_add_parameter_py3 import TaskAddParameter + from .task_add_collection_parameter_py3 import TaskAddCollectionParameter + from .error_message_py3 import ErrorMessage + from .batch_error_detail_py3 import BatchErrorDetail + from .batch_error_py3 import BatchError, BatchErrorException + from .task_add_result_py3 import TaskAddResult + from .task_add_collection_result_py3 import TaskAddCollectionResult + from .subtask_information_py3 import SubtaskInformation + from .cloud_task_list_subtasks_result_py3 import CloudTaskListSubtasksResult + from .task_information_py3 import TaskInformation + from .start_task_information_py3 import StartTaskInformation + from .compute_node_error_py3 import ComputeNodeError + from .inbound_endpoint_py3 import InboundEndpoint + from .compute_node_endpoint_configuration_py3 import ComputeNodeEndpointConfiguration + from .compute_node_py3 import ComputeNode + from .compute_node_user_py3 import ComputeNodeUser + from .compute_node_get_remote_login_settings_result_py3 import ComputeNodeGetRemoteLoginSettingsResult + from .job_schedule_patch_parameter_py3 import JobSchedulePatchParameter + from .job_schedule_update_parameter_py3 import JobScheduleUpdateParameter + from .job_disable_parameter_py3 import JobDisableParameter + from .job_terminate_parameter_py3 import JobTerminateParameter + from .job_patch_parameter_py3 import JobPatchParameter + from .job_update_parameter_py3 import JobUpdateParameter + from .pool_enable_auto_scale_parameter_py3 import PoolEnableAutoScaleParameter + from .pool_evaluate_auto_scale_parameter_py3 import PoolEvaluateAutoScaleParameter + from .pool_resize_parameter_py3 import PoolResizeParameter + from .pool_update_properties_parameter_py3 import PoolUpdatePropertiesParameter + from .pool_patch_parameter_py3 import PoolPatchParameter + from .task_update_parameter_py3 import TaskUpdateParameter + from .node_update_user_parameter_py3 import NodeUpdateUserParameter + from .node_reboot_parameter_py3 import NodeRebootParameter + from .node_reimage_parameter_py3 import NodeReimageParameter + from .node_disable_scheduling_parameter_py3 import NodeDisableSchedulingParameter + from .node_remove_parameter_py3 import NodeRemoveParameter + from .upload_batch_service_logs_configuration_py3 import UploadBatchServiceLogsConfiguration + from .upload_batch_service_logs_result_py3 import UploadBatchServiceLogsResult + from .node_counts_py3 import NodeCounts + from .pool_node_counts_py3 import PoolNodeCounts + from .application_list_options_py3 import ApplicationListOptions + from .application_get_options_py3 import ApplicationGetOptions + from .pool_list_usage_metrics_options_py3 import PoolListUsageMetricsOptions + from .pool_get_all_lifetime_statistics_options_py3 import PoolGetAllLifetimeStatisticsOptions + from .pool_add_options_py3 import PoolAddOptions + from .pool_list_options_py3 import PoolListOptions + from .pool_delete_options_py3 import PoolDeleteOptions + from .pool_exists_options_py3 import PoolExistsOptions + from .pool_get_options_py3 import PoolGetOptions + from .pool_patch_options_py3 import PoolPatchOptions + from .pool_disable_auto_scale_options_py3 import PoolDisableAutoScaleOptions + from .pool_enable_auto_scale_options_py3 import PoolEnableAutoScaleOptions + from .pool_evaluate_auto_scale_options_py3 import PoolEvaluateAutoScaleOptions + from .pool_resize_options_py3 import PoolResizeOptions + from .pool_stop_resize_options_py3 import PoolStopResizeOptions + from .pool_update_properties_options_py3 import PoolUpdatePropertiesOptions + from .pool_remove_nodes_options_py3 import PoolRemoveNodesOptions + from .account_list_supported_images_options_py3 import AccountListSupportedImagesOptions + from .account_list_pool_node_counts_options_py3 import AccountListPoolNodeCountsOptions + from .job_get_all_lifetime_statistics_options_py3 import JobGetAllLifetimeStatisticsOptions + from .job_delete_options_py3 import JobDeleteOptions + from .job_get_options_py3 import JobGetOptions + from .job_patch_options_py3 import JobPatchOptions + from .job_update_options_py3 import JobUpdateOptions + from .job_disable_options_py3 import JobDisableOptions + from .job_enable_options_py3 import JobEnableOptions + from .job_terminate_options_py3 import JobTerminateOptions + from .job_add_options_py3 import JobAddOptions + from .job_list_options_py3 import JobListOptions + from .job_list_from_job_schedule_options_py3 import JobListFromJobScheduleOptions + from .job_list_preparation_and_release_task_status_options_py3 import JobListPreparationAndReleaseTaskStatusOptions + from .job_get_task_counts_options_py3 import JobGetTaskCountsOptions + from .certificate_add_options_py3 import CertificateAddOptions + from .certificate_list_options_py3 import CertificateListOptions + from .certificate_cancel_deletion_options_py3 import CertificateCancelDeletionOptions + from .certificate_delete_options_py3 import CertificateDeleteOptions + from .certificate_get_options_py3 import CertificateGetOptions + from .file_delete_from_task_options_py3 import FileDeleteFromTaskOptions + from .file_get_from_task_options_py3 import FileGetFromTaskOptions + from .file_get_properties_from_task_options_py3 import FileGetPropertiesFromTaskOptions + from .file_delete_from_compute_node_options_py3 import FileDeleteFromComputeNodeOptions + from .file_get_from_compute_node_options_py3 import FileGetFromComputeNodeOptions + from .file_get_properties_from_compute_node_options_py3 import FileGetPropertiesFromComputeNodeOptions + from .file_list_from_task_options_py3 import FileListFromTaskOptions + from .file_list_from_compute_node_options_py3 import FileListFromComputeNodeOptions + from .job_schedule_exists_options_py3 import JobScheduleExistsOptions + from .job_schedule_delete_options_py3 import JobScheduleDeleteOptions + from .job_schedule_get_options_py3 import JobScheduleGetOptions + from .job_schedule_patch_options_py3 import JobSchedulePatchOptions + from .job_schedule_update_options_py3 import JobScheduleUpdateOptions + from .job_schedule_disable_options_py3 import JobScheduleDisableOptions + from .job_schedule_enable_options_py3 import JobScheduleEnableOptions + from .job_schedule_terminate_options_py3 import JobScheduleTerminateOptions + from .job_schedule_add_options_py3 import JobScheduleAddOptions + from .job_schedule_list_options_py3 import JobScheduleListOptions + from .task_add_options_py3 import TaskAddOptions + from .task_list_options_py3 import TaskListOptions + from .task_add_collection_options_py3 import TaskAddCollectionOptions + from .task_delete_options_py3 import TaskDeleteOptions + from .task_get_options_py3 import TaskGetOptions + from .task_update_options_py3 import TaskUpdateOptions + from .task_list_subtasks_options_py3 import TaskListSubtasksOptions + from .task_terminate_options_py3 import TaskTerminateOptions + from .task_reactivate_options_py3 import TaskReactivateOptions + from .compute_node_add_user_options_py3 import ComputeNodeAddUserOptions + from .compute_node_delete_user_options_py3 import ComputeNodeDeleteUserOptions + from .compute_node_update_user_options_py3 import ComputeNodeUpdateUserOptions + from .compute_node_get_options_py3 import ComputeNodeGetOptions + from .compute_node_reboot_options_py3 import ComputeNodeRebootOptions + from .compute_node_reimage_options_py3 import ComputeNodeReimageOptions + from .compute_node_disable_scheduling_options_py3 import ComputeNodeDisableSchedulingOptions + from .compute_node_enable_scheduling_options_py3 import ComputeNodeEnableSchedulingOptions + from .compute_node_get_remote_login_settings_options_py3 import ComputeNodeGetRemoteLoginSettingsOptions + from .compute_node_get_remote_desktop_options_py3 import ComputeNodeGetRemoteDesktopOptions + from .compute_node_upload_batch_service_logs_options_py3 import ComputeNodeUploadBatchServiceLogsOptions + from .compute_node_list_options_py3 import ComputeNodeListOptions +except (SyntaxError, ImportError): + from .pool_usage_metrics import PoolUsageMetrics + from .image_reference import ImageReference + from .image_information import ImageInformation + from .authentication_token_settings import AuthenticationTokenSettings + from .usage_statistics import UsageStatistics + from .resource_statistics import ResourceStatistics + from .pool_statistics import PoolStatistics + from .job_statistics import JobStatistics + from .name_value_pair import NameValuePair + from .delete_certificate_error import DeleteCertificateError + from .certificate import Certificate + from .application_package_reference import ApplicationPackageReference + from .application_summary import ApplicationSummary + from .certificate_add_parameter import CertificateAddParameter + from .file_properties import FileProperties + from .node_file import NodeFile + from .schedule import Schedule + from .job_constraints import JobConstraints + from .job_network_configuration import JobNetworkConfiguration + from .container_registry import ContainerRegistry + from .task_container_settings import TaskContainerSettings + from .resource_file import ResourceFile + from .environment_setting import EnvironmentSetting + from .exit_options import ExitOptions + from .exit_code_mapping import ExitCodeMapping + from .exit_code_range_mapping import ExitCodeRangeMapping + from .exit_conditions import ExitConditions + from .auto_user_specification import AutoUserSpecification + from .user_identity import UserIdentity + from .linux_user_configuration import LinuxUserConfiguration + from .windows_user_configuration import WindowsUserConfiguration + from .user_account import UserAccount + from .task_constraints import TaskConstraints + from .output_file_blob_container_destination import OutputFileBlobContainerDestination + from .output_file_destination import OutputFileDestination + from .output_file_upload_options import OutputFileUploadOptions + from .output_file import OutputFile + from .job_manager_task import JobManagerTask + from .job_preparation_task import JobPreparationTask + from .job_release_task import JobReleaseTask + from .task_scheduling_policy import TaskSchedulingPolicy + from .start_task import StartTask + from .certificate_reference import CertificateReference + from .metadata_item import MetadataItem + from .cloud_service_configuration import CloudServiceConfiguration + from .windows_configuration import WindowsConfiguration + from .data_disk import DataDisk + from .container_configuration import ContainerConfiguration + from .virtual_machine_configuration import VirtualMachineConfiguration + from .network_security_group_rule import NetworkSecurityGroupRule + from .inbound_nat_pool import InboundNATPool + from .pool_endpoint_configuration import PoolEndpointConfiguration + from .network_configuration import NetworkConfiguration + from .pool_specification import PoolSpecification + from .auto_pool_specification import AutoPoolSpecification + from .pool_information import PoolInformation + from .job_specification import JobSpecification + from .recent_job import RecentJob + from .job_schedule_execution_information import JobScheduleExecutionInformation + from .job_schedule_statistics import JobScheduleStatistics + from .cloud_job_schedule import CloudJobSchedule + from .job_schedule_add_parameter import JobScheduleAddParameter + from .job_scheduling_error import JobSchedulingError + from .job_execution_information import JobExecutionInformation + from .cloud_job import CloudJob + from .job_add_parameter import JobAddParameter + from .task_container_execution_information import TaskContainerExecutionInformation + from .task_failure_information import TaskFailureInformation + from .job_preparation_task_execution_information import JobPreparationTaskExecutionInformation + from .job_release_task_execution_information import JobReleaseTaskExecutionInformation + from .job_preparation_and_release_task_execution_information import JobPreparationAndReleaseTaskExecutionInformation + from .task_counts import TaskCounts + from .auto_scale_run_error import AutoScaleRunError + from .auto_scale_run import AutoScaleRun + from .resize_error import ResizeError + from .cloud_pool import CloudPool + from .pool_add_parameter import PoolAddParameter + from .affinity_information import AffinityInformation + from .task_execution_information import TaskExecutionInformation + from .compute_node_information import ComputeNodeInformation + from .node_agent_information import NodeAgentInformation + from .multi_instance_settings import MultiInstanceSettings + from .task_statistics import TaskStatistics + from .task_id_range import TaskIdRange + from .task_dependencies import TaskDependencies + from .cloud_task import CloudTask + from .task_add_parameter import TaskAddParameter + from .task_add_collection_parameter import TaskAddCollectionParameter + from .error_message import ErrorMessage + from .batch_error_detail import BatchErrorDetail + from .batch_error import BatchError, BatchErrorException + from .task_add_result import TaskAddResult + from .task_add_collection_result import TaskAddCollectionResult + from .subtask_information import SubtaskInformation + from .cloud_task_list_subtasks_result import CloudTaskListSubtasksResult + from .task_information import TaskInformation + from .start_task_information import StartTaskInformation + from .compute_node_error import ComputeNodeError + from .inbound_endpoint import InboundEndpoint + from .compute_node_endpoint_configuration import ComputeNodeEndpointConfiguration + from .compute_node import ComputeNode + from .compute_node_user import ComputeNodeUser + from .compute_node_get_remote_login_settings_result import ComputeNodeGetRemoteLoginSettingsResult + from .job_schedule_patch_parameter import JobSchedulePatchParameter + from .job_schedule_update_parameter import JobScheduleUpdateParameter + from .job_disable_parameter import JobDisableParameter + from .job_terminate_parameter import JobTerminateParameter + from .job_patch_parameter import JobPatchParameter + from .job_update_parameter import JobUpdateParameter + from .pool_enable_auto_scale_parameter import PoolEnableAutoScaleParameter + from .pool_evaluate_auto_scale_parameter import PoolEvaluateAutoScaleParameter + from .pool_resize_parameter import PoolResizeParameter + from .pool_update_properties_parameter import PoolUpdatePropertiesParameter + from .pool_patch_parameter import PoolPatchParameter + from .task_update_parameter import TaskUpdateParameter + from .node_update_user_parameter import NodeUpdateUserParameter + from .node_reboot_parameter import NodeRebootParameter + from .node_reimage_parameter import NodeReimageParameter + from .node_disable_scheduling_parameter import NodeDisableSchedulingParameter + from .node_remove_parameter import NodeRemoveParameter + from .upload_batch_service_logs_configuration import UploadBatchServiceLogsConfiguration + from .upload_batch_service_logs_result import UploadBatchServiceLogsResult + from .node_counts import NodeCounts + from .pool_node_counts import PoolNodeCounts + from .application_list_options import ApplicationListOptions + from .application_get_options import ApplicationGetOptions + from .pool_list_usage_metrics_options import PoolListUsageMetricsOptions + from .pool_get_all_lifetime_statistics_options import PoolGetAllLifetimeStatisticsOptions + from .pool_add_options import PoolAddOptions + from .pool_list_options import PoolListOptions + from .pool_delete_options import PoolDeleteOptions + from .pool_exists_options import PoolExistsOptions + from .pool_get_options import PoolGetOptions + from .pool_patch_options import PoolPatchOptions + from .pool_disable_auto_scale_options import PoolDisableAutoScaleOptions + from .pool_enable_auto_scale_options import PoolEnableAutoScaleOptions + from .pool_evaluate_auto_scale_options import PoolEvaluateAutoScaleOptions + from .pool_resize_options import PoolResizeOptions + from .pool_stop_resize_options import PoolStopResizeOptions + from .pool_update_properties_options import PoolUpdatePropertiesOptions + from .pool_remove_nodes_options import PoolRemoveNodesOptions + from .account_list_supported_images_options import AccountListSupportedImagesOptions + from .account_list_pool_node_counts_options import AccountListPoolNodeCountsOptions + from .job_get_all_lifetime_statistics_options import JobGetAllLifetimeStatisticsOptions + from .job_delete_options import JobDeleteOptions + from .job_get_options import JobGetOptions + from .job_patch_options import JobPatchOptions + from .job_update_options import JobUpdateOptions + from .job_disable_options import JobDisableOptions + from .job_enable_options import JobEnableOptions + from .job_terminate_options import JobTerminateOptions + from .job_add_options import JobAddOptions + from .job_list_options import JobListOptions + from .job_list_from_job_schedule_options import JobListFromJobScheduleOptions + from .job_list_preparation_and_release_task_status_options import JobListPreparationAndReleaseTaskStatusOptions + from .job_get_task_counts_options import JobGetTaskCountsOptions + from .certificate_add_options import CertificateAddOptions + from .certificate_list_options import CertificateListOptions + from .certificate_cancel_deletion_options import CertificateCancelDeletionOptions + from .certificate_delete_options import CertificateDeleteOptions + from .certificate_get_options import CertificateGetOptions + from .file_delete_from_task_options import FileDeleteFromTaskOptions + from .file_get_from_task_options import FileGetFromTaskOptions + from .file_get_properties_from_task_options import FileGetPropertiesFromTaskOptions + from .file_delete_from_compute_node_options import FileDeleteFromComputeNodeOptions + from .file_get_from_compute_node_options import FileGetFromComputeNodeOptions + from .file_get_properties_from_compute_node_options import FileGetPropertiesFromComputeNodeOptions + from .file_list_from_task_options import FileListFromTaskOptions + from .file_list_from_compute_node_options import FileListFromComputeNodeOptions + from .job_schedule_exists_options import JobScheduleExistsOptions + from .job_schedule_delete_options import JobScheduleDeleteOptions + from .job_schedule_get_options import JobScheduleGetOptions + from .job_schedule_patch_options import JobSchedulePatchOptions + from .job_schedule_update_options import JobScheduleUpdateOptions + from .job_schedule_disable_options import JobScheduleDisableOptions + from .job_schedule_enable_options import JobScheduleEnableOptions + from .job_schedule_terminate_options import JobScheduleTerminateOptions + from .job_schedule_add_options import JobScheduleAddOptions + from .job_schedule_list_options import JobScheduleListOptions + from .task_add_options import TaskAddOptions + from .task_list_options import TaskListOptions + from .task_add_collection_options import TaskAddCollectionOptions + from .task_delete_options import TaskDeleteOptions + from .task_get_options import TaskGetOptions + from .task_update_options import TaskUpdateOptions + from .task_list_subtasks_options import TaskListSubtasksOptions + from .task_terminate_options import TaskTerminateOptions + from .task_reactivate_options import TaskReactivateOptions + from .compute_node_add_user_options import ComputeNodeAddUserOptions + from .compute_node_delete_user_options import ComputeNodeDeleteUserOptions + from .compute_node_update_user_options import ComputeNodeUpdateUserOptions + from .compute_node_get_options import ComputeNodeGetOptions + from .compute_node_reboot_options import ComputeNodeRebootOptions + from .compute_node_reimage_options import ComputeNodeReimageOptions + from .compute_node_disable_scheduling_options import ComputeNodeDisableSchedulingOptions + from .compute_node_enable_scheduling_options import ComputeNodeEnableSchedulingOptions + from .compute_node_get_remote_login_settings_options import ComputeNodeGetRemoteLoginSettingsOptions + from .compute_node_get_remote_desktop_options import ComputeNodeGetRemoteDesktopOptions + from .compute_node_upload_batch_service_logs_options import ComputeNodeUploadBatchServiceLogsOptions + from .compute_node_list_options import ComputeNodeListOptions +from .application_summary_paged import ApplicationSummaryPaged +from .pool_usage_metrics_paged import PoolUsageMetricsPaged +from .cloud_pool_paged import CloudPoolPaged +from .image_information_paged import ImageInformationPaged +from .pool_node_counts_paged import PoolNodeCountsPaged +from .cloud_job_paged import CloudJobPaged +from .job_preparation_and_release_task_execution_information_paged import JobPreparationAndReleaseTaskExecutionInformationPaged +from .certificate_paged import CertificatePaged +from .node_file_paged import NodeFilePaged +from .cloud_job_schedule_paged import CloudJobSchedulePaged +from .cloud_task_paged import CloudTaskPaged +from .compute_node_paged import ComputeNodePaged +from .batch_service_client_enums import ( + OSType, + VerificationType, + AccessScope, + CertificateState, + CertificateFormat, + ContainerWorkingDirectory, + JobAction, + DependencyAction, + AutoUserScope, + ElevationLevel, + LoginMode, + OutputFileUploadCondition, + ComputeNodeFillType, + CertificateStoreLocation, + CertificateVisibility, + CachingType, + StorageAccountType, + DynamicVNetAssignmentScope, + InboundEndpointProtocol, + NetworkSecurityGroupRuleAccess, + PoolLifetimeOption, + OnAllTasksComplete, + OnTaskFailure, + JobScheduleState, + ErrorCategory, + JobState, + JobPreparationTaskState, + TaskExecutionResult, + JobReleaseTaskState, + PoolState, + AllocationState, + TaskState, + TaskAddStatus, + SubtaskState, + StartTaskState, + ComputeNodeState, + SchedulingState, + DisableJobOption, + ComputeNodeDeallocationOption, + ComputeNodeRebootOption, + ComputeNodeReimageOption, + DisableComputeNodeSchedulingOption, +) + +__all__ = [ + 'PoolUsageMetrics', + 'ImageReference', + 'ImageInformation', + 'AuthenticationTokenSettings', + 'UsageStatistics', + 'ResourceStatistics', + 'PoolStatistics', + 'JobStatistics', + 'NameValuePair', + 'DeleteCertificateError', + 'Certificate', + 'ApplicationPackageReference', + 'ApplicationSummary', + 'CertificateAddParameter', + 'FileProperties', + 'NodeFile', + 'Schedule', + 'JobConstraints', + 'JobNetworkConfiguration', + 'ContainerRegistry', + 'TaskContainerSettings', + 'ResourceFile', + 'EnvironmentSetting', + 'ExitOptions', + 'ExitCodeMapping', + 'ExitCodeRangeMapping', + 'ExitConditions', + 'AutoUserSpecification', + 'UserIdentity', + 'LinuxUserConfiguration', + 'WindowsUserConfiguration', + 'UserAccount', + 'TaskConstraints', + 'OutputFileBlobContainerDestination', + 'OutputFileDestination', + 'OutputFileUploadOptions', + 'OutputFile', + 'JobManagerTask', + 'JobPreparationTask', + 'JobReleaseTask', + 'TaskSchedulingPolicy', + 'StartTask', + 'CertificateReference', + 'MetadataItem', + 'CloudServiceConfiguration', + 'WindowsConfiguration', + 'DataDisk', + 'ContainerConfiguration', + 'VirtualMachineConfiguration', + 'NetworkSecurityGroupRule', + 'InboundNATPool', + 'PoolEndpointConfiguration', + 'NetworkConfiguration', + 'PoolSpecification', + 'AutoPoolSpecification', + 'PoolInformation', + 'JobSpecification', + 'RecentJob', + 'JobScheduleExecutionInformation', + 'JobScheduleStatistics', + 'CloudJobSchedule', + 'JobScheduleAddParameter', + 'JobSchedulingError', + 'JobExecutionInformation', + 'CloudJob', + 'JobAddParameter', + 'TaskContainerExecutionInformation', + 'TaskFailureInformation', + 'JobPreparationTaskExecutionInformation', + 'JobReleaseTaskExecutionInformation', + 'JobPreparationAndReleaseTaskExecutionInformation', + 'TaskCounts', + 'AutoScaleRunError', + 'AutoScaleRun', + 'ResizeError', + 'CloudPool', + 'PoolAddParameter', + 'AffinityInformation', + 'TaskExecutionInformation', + 'ComputeNodeInformation', + 'NodeAgentInformation', + 'MultiInstanceSettings', + 'TaskStatistics', + 'TaskIdRange', + 'TaskDependencies', + 'CloudTask', + 'TaskAddParameter', + 'TaskAddCollectionParameter', + 'ErrorMessage', + 'BatchErrorDetail', + 'BatchError', 'BatchErrorException', + 'TaskAddResult', + 'TaskAddCollectionResult', + 'SubtaskInformation', + 'CloudTaskListSubtasksResult', + 'TaskInformation', + 'StartTaskInformation', + 'ComputeNodeError', + 'InboundEndpoint', + 'ComputeNodeEndpointConfiguration', + 'ComputeNode', + 'ComputeNodeUser', + 'ComputeNodeGetRemoteLoginSettingsResult', + 'JobSchedulePatchParameter', + 'JobScheduleUpdateParameter', + 'JobDisableParameter', + 'JobTerminateParameter', + 'JobPatchParameter', + 'JobUpdateParameter', + 'PoolEnableAutoScaleParameter', + 'PoolEvaluateAutoScaleParameter', + 'PoolResizeParameter', + 'PoolUpdatePropertiesParameter', + 'PoolPatchParameter', + 'TaskUpdateParameter', + 'NodeUpdateUserParameter', + 'NodeRebootParameter', + 'NodeReimageParameter', + 'NodeDisableSchedulingParameter', + 'NodeRemoveParameter', + 'UploadBatchServiceLogsConfiguration', + 'UploadBatchServiceLogsResult', + 'NodeCounts', + 'PoolNodeCounts', + 'ApplicationListOptions', + 'ApplicationGetOptions', + 'PoolListUsageMetricsOptions', + 'PoolGetAllLifetimeStatisticsOptions', + 'PoolAddOptions', + 'PoolListOptions', + 'PoolDeleteOptions', + 'PoolExistsOptions', + 'PoolGetOptions', + 'PoolPatchOptions', + 'PoolDisableAutoScaleOptions', + 'PoolEnableAutoScaleOptions', + 'PoolEvaluateAutoScaleOptions', + 'PoolResizeOptions', + 'PoolStopResizeOptions', + 'PoolUpdatePropertiesOptions', + 'PoolRemoveNodesOptions', + 'AccountListSupportedImagesOptions', + 'AccountListPoolNodeCountsOptions', + 'JobGetAllLifetimeStatisticsOptions', + 'JobDeleteOptions', + 'JobGetOptions', + 'JobPatchOptions', + 'JobUpdateOptions', + 'JobDisableOptions', + 'JobEnableOptions', + 'JobTerminateOptions', + 'JobAddOptions', + 'JobListOptions', + 'JobListFromJobScheduleOptions', + 'JobListPreparationAndReleaseTaskStatusOptions', + 'JobGetTaskCountsOptions', + 'CertificateAddOptions', + 'CertificateListOptions', + 'CertificateCancelDeletionOptions', + 'CertificateDeleteOptions', + 'CertificateGetOptions', + 'FileDeleteFromTaskOptions', + 'FileGetFromTaskOptions', + 'FileGetPropertiesFromTaskOptions', + 'FileDeleteFromComputeNodeOptions', + 'FileGetFromComputeNodeOptions', + 'FileGetPropertiesFromComputeNodeOptions', + 'FileListFromTaskOptions', + 'FileListFromComputeNodeOptions', + 'JobScheduleExistsOptions', + 'JobScheduleDeleteOptions', + 'JobScheduleGetOptions', + 'JobSchedulePatchOptions', + 'JobScheduleUpdateOptions', + 'JobScheduleDisableOptions', + 'JobScheduleEnableOptions', + 'JobScheduleTerminateOptions', + 'JobScheduleAddOptions', + 'JobScheduleListOptions', + 'TaskAddOptions', + 'TaskListOptions', + 'TaskAddCollectionOptions', + 'TaskDeleteOptions', + 'TaskGetOptions', + 'TaskUpdateOptions', + 'TaskListSubtasksOptions', + 'TaskTerminateOptions', + 'TaskReactivateOptions', + 'ComputeNodeAddUserOptions', + 'ComputeNodeDeleteUserOptions', + 'ComputeNodeUpdateUserOptions', + 'ComputeNodeGetOptions', + 'ComputeNodeRebootOptions', + 'ComputeNodeReimageOptions', + 'ComputeNodeDisableSchedulingOptions', + 'ComputeNodeEnableSchedulingOptions', + 'ComputeNodeGetRemoteLoginSettingsOptions', + 'ComputeNodeGetRemoteDesktopOptions', + 'ComputeNodeUploadBatchServiceLogsOptions', + 'ComputeNodeListOptions', + 'ApplicationSummaryPaged', + 'PoolUsageMetricsPaged', + 'CloudPoolPaged', + 'ImageInformationPaged', + 'PoolNodeCountsPaged', + 'CloudJobPaged', + 'JobPreparationAndReleaseTaskExecutionInformationPaged', + 'CertificatePaged', + 'NodeFilePaged', + 'CloudJobSchedulePaged', + 'CloudTaskPaged', + 'ComputeNodePaged', + 'OSType', + 'VerificationType', + 'AccessScope', + 'CertificateState', + 'CertificateFormat', + 'ContainerWorkingDirectory', + 'JobAction', + 'DependencyAction', + 'AutoUserScope', + 'ElevationLevel', + 'LoginMode', + 'OutputFileUploadCondition', + 'ComputeNodeFillType', + 'CertificateStoreLocation', + 'CertificateVisibility', + 'CachingType', + 'StorageAccountType', + 'DynamicVNetAssignmentScope', + 'InboundEndpointProtocol', + 'NetworkSecurityGroupRuleAccess', + 'PoolLifetimeOption', + 'OnAllTasksComplete', + 'OnTaskFailure', + 'JobScheduleState', + 'ErrorCategory', + 'JobState', + 'JobPreparationTaskState', + 'TaskExecutionResult', + 'JobReleaseTaskState', + 'PoolState', + 'AllocationState', + 'TaskState', + 'TaskAddStatus', + 'SubtaskState', + 'StartTaskState', + 'ComputeNodeState', + 'SchedulingState', + 'DisableJobOption', + 'ComputeNodeDeallocationOption', + 'ComputeNodeRebootOption', + 'ComputeNodeReimageOption', + 'DisableComputeNodeSchedulingOption', +] diff --git a/azext/generated/sdk/batch/v2019_06_01/models/account_list_pool_node_counts_options.py b/azext/generated/sdk/batch/v2019_06_01/models/account_list_pool_node_counts_options.py new file mode 100644 index 00000000..4ad2da01 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/account_list_pool_node_counts_options.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AccountListPoolNodeCountsOptions(Model): + """Additional parameters for list_pool_node_counts operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch. + :type filter: str + :param max_results: The maximum number of items to return in the response. + Default value: 10 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(AccountListPoolNodeCountsOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.max_results = kwargs.get('max_results', 10) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/account_list_pool_node_counts_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/account_list_pool_node_counts_options_py3.py new file mode 100644 index 00000000..e9f0d02b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/account_list_pool_node_counts_options_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AccountListPoolNodeCountsOptions(Model): + """Additional parameters for list_pool_node_counts operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch. + :type filter: str + :param max_results: The maximum number of items to return in the response. + Default value: 10 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, max_results: int=10, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(AccountListPoolNodeCountsOptions, self).__init__(**kwargs) + self.filter = filter + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/account_list_supported_images_options.py b/azext/generated/sdk/batch/v2019_06_01/models/account_list_supported_images_options.py new file mode 100644 index 00000000..d6fdedf2 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/account_list_supported_images_options.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AccountListSupportedImagesOptions(Model): + """Additional parameters for list_supported_images operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 results will be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(AccountListSupportedImagesOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/account_list_supported_images_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/account_list_supported_images_options_py3.py new file mode 100644 index 00000000..35d60b10 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/account_list_supported_images_options_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AccountListSupportedImagesOptions(Model): + """Additional parameters for list_supported_images operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 results will be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(AccountListSupportedImagesOptions, self).__init__(**kwargs) + self.filter = filter + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/affinity_information.py b/azext/generated/sdk/batch/v2019_06_01/models/affinity_information.py new file mode 100644 index 00000000..c63658ec --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/affinity_information.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AffinityInformation(Model): + """A locality hint that can be used by the Batch service to select a Compute + Node on which to start a Task. + + All required parameters must be populated in order to send to Azure. + + :param affinity_id: Required. An opaque string representing the location + of a Compute Node or a Task that has run previously. You can pass the + affinityId of a Node to indicate that this Task needs to run on that + Compute Node. Note that this is just a soft affinity. If the target + Compute Node is busy or unavailable at the time the Task is scheduled, + then the Task will be scheduled elsewhere. + :type affinity_id: str + """ + + _validation = { + 'affinity_id': {'required': True}, + } + + _attribute_map = { + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(AffinityInformation, self).__init__(**kwargs) + self.affinity_id = kwargs.get('affinity_id', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/affinity_information_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/affinity_information_py3.py new file mode 100644 index 00000000..98463bb2 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/affinity_information_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AffinityInformation(Model): + """A locality hint that can be used by the Batch service to select a Compute + Node on which to start a Task. + + All required parameters must be populated in order to send to Azure. + + :param affinity_id: Required. An opaque string representing the location + of a Compute Node or a Task that has run previously. You can pass the + affinityId of a Node to indicate that this Task needs to run on that + Compute Node. Note that this is just a soft affinity. If the target + Compute Node is busy or unavailable at the time the Task is scheduled, + then the Task will be scheduled elsewhere. + :type affinity_id: str + """ + + _validation = { + 'affinity_id': {'required': True}, + } + + _attribute_map = { + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + } + + def __init__(self, *, affinity_id: str, **kwargs) -> None: + super(AffinityInformation, self).__init__(**kwargs) + self.affinity_id = affinity_id diff --git a/azext/generated/sdk/batch/v2019_06_01/models/application_get_options.py b/azext/generated/sdk/batch/v2019_06_01/models/application_get_options.py new file mode 100644 index 00000000..038c5421 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/application_get_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationGetOptions(Model): + """Additional parameters for get operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ApplicationGetOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/application_get_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/application_get_options_py3.py new file mode 100644 index 00000000..3c9d5c0a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/application_get_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationGetOptions(Model): + """Additional parameters for get operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ApplicationGetOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/application_list_options.py b/azext/generated/sdk/batch/v2019_06_01/models/application_list_options.py new file mode 100644 index 00000000..bc3ddb36 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/application_list_options.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationListOptions(Model): + """Additional parameters for list operation. + + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 applications can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ApplicationListOptions, self).__init__(**kwargs) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/application_list_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/application_list_options_py3.py new file mode 100644 index 00000000..445de51e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/application_list_options_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationListOptions(Model): + """Additional parameters for list operation. + + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 applications can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ApplicationListOptions, self).__init__(**kwargs) + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/application_package_reference.py b/azext/generated/sdk/batch/v2019_06_01/models/application_package_reference.py new file mode 100644 index 00000000..52df2028 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/application_package_reference.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationPackageReference(Model): + """A reference to an Package to be deployed to Compute Nodes. + + All required parameters must be populated in order to send to Azure. + + :param application_id: Required. The ID of the application to deploy. + :type application_id: str + :param version: The version of the application to deploy. If omitted, the + default version is deployed. If this is omitted on a Pool, and no default + version is specified for this application, the request fails with the + error code InvalidApplicationPackageReferences and HTTP status code 409. + If this is omitted on a Task, and no default version is specified for this + application, the Task fails with a pre-processing error. + :type version: str + """ + + _validation = { + 'application_id': {'required': True}, + } + + _attribute_map = { + 'application_id': {'key': 'applicationId', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ApplicationPackageReference, self).__init__(**kwargs) + self.application_id = kwargs.get('application_id', None) + self.version = kwargs.get('version', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/application_package_reference_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/application_package_reference_py3.py new file mode 100644 index 00000000..0c034391 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/application_package_reference_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationPackageReference(Model): + """A reference to an Package to be deployed to Compute Nodes. + + All required parameters must be populated in order to send to Azure. + + :param application_id: Required. The ID of the application to deploy. + :type application_id: str + :param version: The version of the application to deploy. If omitted, the + default version is deployed. If this is omitted on a Pool, and no default + version is specified for this application, the request fails with the + error code InvalidApplicationPackageReferences and HTTP status code 409. + If this is omitted on a Task, and no default version is specified for this + application, the Task fails with a pre-processing error. + :type version: str + """ + + _validation = { + 'application_id': {'required': True}, + } + + _attribute_map = { + 'application_id': {'key': 'applicationId', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + } + + def __init__(self, *, application_id: str, version: str=None, **kwargs) -> None: + super(ApplicationPackageReference, self).__init__(**kwargs) + self.application_id = application_id + self.version = version diff --git a/azext/generated/sdk/batch/v2019_06_01/models/application_summary.py b/azext/generated/sdk/batch/v2019_06_01/models/application_summary.py new file mode 100644 index 00000000..9f6bc127 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/application_summary.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationSummary(Model): + """Contains information about an application in an Azure Batch Account. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the application + within the Account. + :type id: str + :param display_name: Required. The display name for the application. + :type display_name: str + :param versions: Required. The list of available versions of the + application. + :type versions: list[str] + """ + + _validation = { + 'id': {'required': True}, + 'display_name': {'required': True}, + 'versions': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'versions': {'key': 'versions', 'type': '[str]'}, + } + + def __init__(self, **kwargs): + super(ApplicationSummary, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.versions = kwargs.get('versions', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/application_summary_paged.py b/azext/generated/sdk/batch/v2019_06_01/models/application_summary_paged.py new file mode 100644 index 00000000..64ed9c6b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/application_summary_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class ApplicationSummaryPaged(Paged): + """ + A paging container for iterating over a list of :class:`ApplicationSummary ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[ApplicationSummary]'} + } + + def __init__(self, *args, **kwargs): + + super(ApplicationSummaryPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/application_summary_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/application_summary_py3.py new file mode 100644 index 00000000..c2fa3677 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/application_summary_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationSummary(Model): + """Contains information about an application in an Azure Batch Account. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the application + within the Account. + :type id: str + :param display_name: Required. The display name for the application. + :type display_name: str + :param versions: Required. The list of available versions of the + application. + :type versions: list[str] + """ + + _validation = { + 'id': {'required': True}, + 'display_name': {'required': True}, + 'versions': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'versions': {'key': 'versions', 'type': '[str]'}, + } + + def __init__(self, *, id: str, display_name: str, versions, **kwargs) -> None: + super(ApplicationSummary, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.versions = versions diff --git a/azext/generated/sdk/batch/v2019_06_01/models/authentication_token_settings.py b/azext/generated/sdk/batch/v2019_06_01/models/authentication_token_settings.py new file mode 100644 index 00000000..f3a552a6 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/authentication_token_settings.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AuthenticationTokenSettings(Model): + """The settings for an authentication token that the Task can use to perform + Batch service operations. + + :param access: The Batch resources to which the token grants access. The + authentication token grants access to a limited set of Batch service + operations. Currently the only supported value for the access property is + 'job', which grants access to all operations related to the Job which + contains the Task. + :type access: list[str or ~azure.batch.models.AccessScope] + """ + + _attribute_map = { + 'access': {'key': 'access', 'type': '[AccessScope]'}, + } + + def __init__(self, **kwargs): + super(AuthenticationTokenSettings, self).__init__(**kwargs) + self.access = kwargs.get('access', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/authentication_token_settings_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/authentication_token_settings_py3.py new file mode 100644 index 00000000..ee605c67 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/authentication_token_settings_py3.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AuthenticationTokenSettings(Model): + """The settings for an authentication token that the Task can use to perform + Batch service operations. + + :param access: The Batch resources to which the token grants access. The + authentication token grants access to a limited set of Batch service + operations. Currently the only supported value for the access property is + 'job', which grants access to all operations related to the Job which + contains the Task. + :type access: list[str or ~azure.batch.models.AccessScope] + """ + + _attribute_map = { + 'access': {'key': 'access', 'type': '[AccessScope]'}, + } + + def __init__(self, *, access=None, **kwargs) -> None: + super(AuthenticationTokenSettings, self).__init__(**kwargs) + self.access = access diff --git a/azext/generated/sdk/batch/v2019_06_01/models/auto_pool_specification.py b/azext/generated/sdk/batch/v2019_06_01/models/auto_pool_specification.py new file mode 100644 index 00000000..2972c8cf --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/auto_pool_specification.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoPoolSpecification(Model): + """Specifies characteristics for a temporary 'auto pool'. The Batch service + will create this auto Pool when the Job is submitted. + + All required parameters must be populated in order to send to Azure. + + :param auto_pool_id_prefix: A prefix to be added to the unique identifier + when a Pool is automatically created. The Batch service assigns each auto + Pool a unique identifier on creation. To distinguish between Pools created + for different purposes, you can specify this element to add a prefix to + the ID that is assigned. The prefix can be up to 20 characters long. + :type auto_pool_id_prefix: str + :param pool_lifetime_option: Required. The minimum lifetime of created + auto Pools, and how multiple Jobs on a schedule are assigned to Pools. + Possible values include: 'jobSchedule', 'job' + :type pool_lifetime_option: str or ~azure.batch.models.PoolLifetimeOption + :param keep_alive: Whether to keep an auto Pool alive after its lifetime + expires. If false, the Batch service deletes the Pool once its lifetime + (as determined by the poolLifetimeOption setting) expires; that is, when + the Job or Job Schedule completes. If true, the Batch service does not + delete the Pool automatically. It is up to the user to delete auto Pools + created with this option. + :type keep_alive: bool + :param pool: The Pool specification for the auto Pool. + :type pool: ~azure.batch.models.PoolSpecification + """ + + _validation = { + 'pool_lifetime_option': {'required': True}, + } + + _attribute_map = { + 'auto_pool_id_prefix': {'key': 'autoPoolIdPrefix', 'type': 'str'}, + 'pool_lifetime_option': {'key': 'poolLifetimeOption', 'type': 'PoolLifetimeOption'}, + 'keep_alive': {'key': 'keepAlive', 'type': 'bool'}, + 'pool': {'key': 'pool', 'type': 'PoolSpecification'}, + } + + def __init__(self, **kwargs): + super(AutoPoolSpecification, self).__init__(**kwargs) + self.auto_pool_id_prefix = kwargs.get('auto_pool_id_prefix', None) + self.pool_lifetime_option = kwargs.get('pool_lifetime_option', None) + self.keep_alive = kwargs.get('keep_alive', None) + self.pool = kwargs.get('pool', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/auto_pool_specification_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/auto_pool_specification_py3.py new file mode 100644 index 00000000..34b01b40 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/auto_pool_specification_py3.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoPoolSpecification(Model): + """Specifies characteristics for a temporary 'auto pool'. The Batch service + will create this auto Pool when the Job is submitted. + + All required parameters must be populated in order to send to Azure. + + :param auto_pool_id_prefix: A prefix to be added to the unique identifier + when a Pool is automatically created. The Batch service assigns each auto + Pool a unique identifier on creation. To distinguish between Pools created + for different purposes, you can specify this element to add a prefix to + the ID that is assigned. The prefix can be up to 20 characters long. + :type auto_pool_id_prefix: str + :param pool_lifetime_option: Required. The minimum lifetime of created + auto Pools, and how multiple Jobs on a schedule are assigned to Pools. + Possible values include: 'jobSchedule', 'job' + :type pool_lifetime_option: str or ~azure.batch.models.PoolLifetimeOption + :param keep_alive: Whether to keep an auto Pool alive after its lifetime + expires. If false, the Batch service deletes the Pool once its lifetime + (as determined by the poolLifetimeOption setting) expires; that is, when + the Job or Job Schedule completes. If true, the Batch service does not + delete the Pool automatically. It is up to the user to delete auto Pools + created with this option. + :type keep_alive: bool + :param pool: The Pool specification for the auto Pool. + :type pool: ~azure.batch.models.PoolSpecification + """ + + _validation = { + 'pool_lifetime_option': {'required': True}, + } + + _attribute_map = { + 'auto_pool_id_prefix': {'key': 'autoPoolIdPrefix', 'type': 'str'}, + 'pool_lifetime_option': {'key': 'poolLifetimeOption', 'type': 'PoolLifetimeOption'}, + 'keep_alive': {'key': 'keepAlive', 'type': 'bool'}, + 'pool': {'key': 'pool', 'type': 'PoolSpecification'}, + } + + def __init__(self, *, pool_lifetime_option, auto_pool_id_prefix: str=None, keep_alive: bool=None, pool=None, **kwargs) -> None: + super(AutoPoolSpecification, self).__init__(**kwargs) + self.auto_pool_id_prefix = auto_pool_id_prefix + self.pool_lifetime_option = pool_lifetime_option + self.keep_alive = keep_alive + self.pool = pool diff --git a/azext/generated/sdk/batch/v2019_06_01/models/auto_scale_run.py b/azext/generated/sdk/batch/v2019_06_01/models/auto_scale_run.py new file mode 100644 index 00000000..28e11e8d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/auto_scale_run.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoScaleRun(Model): + """The results and errors from an execution of a Pool autoscale formula. + + All required parameters must be populated in order to send to Azure. + + :param timestamp: Required. The time at which the autoscale formula was + last evaluated. + :type timestamp: datetime + :param results: The final values of all variables used in the evaluation + of the autoscale formula. Each variable value is returned in the form + $variable=value, and variables are separated by semicolons. + :type results: str + :param error: Details of the error encountered evaluating the autoscale + formula on the Pool, if the evaluation was unsuccessful. + :type error: ~azure.batch.models.AutoScaleRunError + """ + + _validation = { + 'timestamp': {'required': True}, + } + + _attribute_map = { + 'timestamp': {'key': 'timestamp', 'type': 'iso-8601'}, + 'results': {'key': 'results', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'AutoScaleRunError'}, + } + + def __init__(self, **kwargs): + super(AutoScaleRun, self).__init__(**kwargs) + self.timestamp = kwargs.get('timestamp', None) + self.results = kwargs.get('results', None) + self.error = kwargs.get('error', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/auto_scale_run_error.py b/azext/generated/sdk/batch/v2019_06_01/models/auto_scale_run_error.py new file mode 100644 index 00000000..542fe623 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/auto_scale_run_error.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoScaleRunError(Model): + """An error that occurred when executing or evaluating a Pool autoscale + formula. + + :param code: An identifier for the autoscale error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the autoscale error, intended to be + suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the autoscale + error. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(AutoScaleRunError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.values = kwargs.get('values', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/auto_scale_run_error_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/auto_scale_run_error_py3.py new file mode 100644 index 00000000..62b79622 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/auto_scale_run_error_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoScaleRunError(Model): + """An error that occurred when executing or evaluating a Pool autoscale + formula. + + :param code: An identifier for the autoscale error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the autoscale error, intended to be + suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the autoscale + error. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, code: str=None, message: str=None, values=None, **kwargs) -> None: + super(AutoScaleRunError, self).__init__(**kwargs) + self.code = code + self.message = message + self.values = values diff --git a/azext/generated/sdk/batch/v2019_06_01/models/auto_scale_run_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/auto_scale_run_py3.py new file mode 100644 index 00000000..7607b04f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/auto_scale_run_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoScaleRun(Model): + """The results and errors from an execution of a Pool autoscale formula. + + All required parameters must be populated in order to send to Azure. + + :param timestamp: Required. The time at which the autoscale formula was + last evaluated. + :type timestamp: datetime + :param results: The final values of all variables used in the evaluation + of the autoscale formula. Each variable value is returned in the form + $variable=value, and variables are separated by semicolons. + :type results: str + :param error: Details of the error encountered evaluating the autoscale + formula on the Pool, if the evaluation was unsuccessful. + :type error: ~azure.batch.models.AutoScaleRunError + """ + + _validation = { + 'timestamp': {'required': True}, + } + + _attribute_map = { + 'timestamp': {'key': 'timestamp', 'type': 'iso-8601'}, + 'results': {'key': 'results', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'AutoScaleRunError'}, + } + + def __init__(self, *, timestamp, results: str=None, error=None, **kwargs) -> None: + super(AutoScaleRun, self).__init__(**kwargs) + self.timestamp = timestamp + self.results = results + self.error = error diff --git a/azext/generated/sdk/batch/v2019_06_01/models/auto_user_specification.py b/azext/generated/sdk/batch/v2019_06_01/models/auto_user_specification.py new file mode 100644 index 00000000..c8f791e6 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/auto_user_specification.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoUserSpecification(Model): + """Specifies the parameters for the auto user that runs a Task on the Batch + service. + + :param scope: The scope for the auto user. The default value is Task. + Possible values include: 'task', 'pool' + :type scope: str or ~azure.batch.models.AutoUserScope + :param elevation_level: The elevation level of the auto user. The default + value is nonAdmin. Possible values include: 'nonAdmin', 'admin' + :type elevation_level: str or ~azure.batch.models.ElevationLevel + """ + + _attribute_map = { + 'scope': {'key': 'scope', 'type': 'AutoUserScope'}, + 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, + } + + def __init__(self, **kwargs): + super(AutoUserSpecification, self).__init__(**kwargs) + self.scope = kwargs.get('scope', None) + self.elevation_level = kwargs.get('elevation_level', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/auto_user_specification_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/auto_user_specification_py3.py new file mode 100644 index 00000000..c586947f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/auto_user_specification_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoUserSpecification(Model): + """Specifies the parameters for the auto user that runs a Task on the Batch + service. + + :param scope: The scope for the auto user. The default value is Task. + Possible values include: 'task', 'pool' + :type scope: str or ~azure.batch.models.AutoUserScope + :param elevation_level: The elevation level of the auto user. The default + value is nonAdmin. Possible values include: 'nonAdmin', 'admin' + :type elevation_level: str or ~azure.batch.models.ElevationLevel + """ + + _attribute_map = { + 'scope': {'key': 'scope', 'type': 'AutoUserScope'}, + 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, + } + + def __init__(self, *, scope=None, elevation_level=None, **kwargs) -> None: + super(AutoUserSpecification, self).__init__(**kwargs) + self.scope = scope + self.elevation_level = elevation_level diff --git a/azext/generated/sdk/batch/v2019_06_01/models/batch_error.py b/azext/generated/sdk/batch/v2019_06_01/models/batch_error.py new file mode 100644 index 00000000..3857ac96 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/batch_error.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from msrest.exceptions import HttpOperationError + + +class BatchError(Model): + """An error response received from the Azure Batch service. + + :param code: An identifier for the error. Codes are invariant and are + intended to be consumed programmatically. + :type code: str + :param message: A message describing the error, intended to be suitable + for display in a user interface. + :type message: ~azure.batch.models.ErrorMessage + :param values: A collection of key-value pairs containing additional + details about the error. + :type values: list[~azure.batch.models.BatchErrorDetail] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'ErrorMessage'}, + 'values': {'key': 'values', 'type': '[BatchErrorDetail]'}, + } + + def __init__(self, **kwargs): + super(BatchError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.values = kwargs.get('values', None) + + +class BatchErrorException(HttpOperationError): + """Server responsed with exception of type: 'BatchError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, deserialize, response, *args): + + super(BatchErrorException, self).__init__(deserialize, response, 'BatchError', *args) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/batch_error_detail.py b/azext/generated/sdk/batch/v2019_06_01/models/batch_error_detail.py new file mode 100644 index 00000000..a892678c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/batch_error_detail.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BatchErrorDetail(Model): + """An item of additional information included in an Azure Batch error + response. + + :param key: An identifier specifying the meaning of the Value property. + :type key: str + :param value: The additional information included with the error response. + :type value: str + """ + + _attribute_map = { + 'key': {'key': 'key', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(BatchErrorDetail, self).__init__(**kwargs) + self.key = kwargs.get('key', None) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/batch_error_detail_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/batch_error_detail_py3.py new file mode 100644 index 00000000..8aa8a85b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/batch_error_detail_py3.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BatchErrorDetail(Model): + """An item of additional information included in an Azure Batch error + response. + + :param key: An identifier specifying the meaning of the Value property. + :type key: str + :param value: The additional information included with the error response. + :type value: str + """ + + _attribute_map = { + 'key': {'key': 'key', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, *, key: str=None, value: str=None, **kwargs) -> None: + super(BatchErrorDetail, self).__init__(**kwargs) + self.key = key + self.value = value diff --git a/azext/generated/sdk/batch/v2019_06_01/models/batch_error_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/batch_error_py3.py new file mode 100644 index 00000000..a6e49569 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/batch_error_py3.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from msrest.exceptions import HttpOperationError + + +class BatchError(Model): + """An error response received from the Azure Batch service. + + :param code: An identifier for the error. Codes are invariant and are + intended to be consumed programmatically. + :type code: str + :param message: A message describing the error, intended to be suitable + for display in a user interface. + :type message: ~azure.batch.models.ErrorMessage + :param values: A collection of key-value pairs containing additional + details about the error. + :type values: list[~azure.batch.models.BatchErrorDetail] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'ErrorMessage'}, + 'values': {'key': 'values', 'type': '[BatchErrorDetail]'}, + } + + def __init__(self, *, code: str=None, message=None, values=None, **kwargs) -> None: + super(BatchError, self).__init__(**kwargs) + self.code = code + self.message = message + self.values = values + + +class BatchErrorException(HttpOperationError): + """Server responsed with exception of type: 'BatchError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, deserialize, response, *args): + + super(BatchErrorException, self).__init__(deserialize, response, 'BatchError', *args) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/batch_service_client_enums.py b/azext/generated/sdk/batch/v2019_06_01/models/batch_service_client_enums.py new file mode 100644 index 00000000..fb65370b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/batch_service_client_enums.py @@ -0,0 +1,300 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum + + +class OSType(str, Enum): + + linux = "linux" #: The Linux operating system. + windows = "windows" #: The Windows operating system. + + +class VerificationType(str, Enum): + + verified = "verified" #: The Image is guaranteed to be compatible with the associated Compute Node agent SKU and all Batch features have been confirmed to work as expected. + unverified = "unverified" #: The associated Compute Node agent SKU should have binary compatibility with the Image, but specific functionality has not been verified. + + +class AccessScope(str, Enum): + + job = "job" #: Grants access to perform all operations on the Job containing the Task. + + +class CertificateState(str, Enum): + + active = "active" #: The Certificate is available for use in Pools. + deleting = "deleting" #: The user has requested that the Certificate be deleted, but the delete operation has not yet completed. You may not reference the Certificate when creating or updating Pools. + delete_failed = "deletefailed" #: The user requested that the Certificate be deleted, but there are Pools that still have references to the Certificate, or it is still installed on one or more Nodes. (The latter can occur if the Certificate has been removed from the Pool, but the Compute Node has not yet restarted. Compute Nodes refresh their Certificates only when they restart.) You may use the cancel Certificate delete operation to cancel the delete, or the delete Certificate operation to retry the delete. + + +class CertificateFormat(str, Enum): + + pfx = "pfx" #: The Certificate is a PFX (PKCS#12) formatted Certificate or Certificate chain. + cer = "cer" #: The Certificate is a base64-encoded X.509 Certificate. + + +class ContainerWorkingDirectory(str, Enum): + + task_working_directory = "taskWorkingDirectory" #: Use the standard Batch service Task working directory, which will contain the Task Resource Files populated by Batch. + container_image_default = "containerImageDefault" #: Use the working directory defined in the container Image. Beware that this directory will not contain the Resource Files downloaded by Batch. + + +class JobAction(str, Enum): + + none = "none" #: Take no action. + disable = "disable" #: Disable the Job. This is equivalent to calling the disable Job API, with a disableTasks value of requeue. + terminate = "terminate" #: Terminate the Job. The terminateReason in the Job's executionInfo is set to "TaskFailed". + + +class DependencyAction(str, Enum): + + satisfy = "satisfy" #: Satisfy the Task's dependencies. + block = "block" #: Block the Task's dependencies. + + +class AutoUserScope(str, Enum): + + task = "task" #: Specifies that the service should create a new user for the Task. + pool = "pool" #: Specifies that the Task runs as the common auto user Account which is created on every Compute Node in a Pool. + + +class ElevationLevel(str, Enum): + + non_admin = "nonadmin" #: The user is a standard user without elevated access. + admin = "admin" #: The user is a user with elevated access and operates with full Administrator permissions. + + +class LoginMode(str, Enum): + + batch = "batch" #: The LOGON32_LOGON_BATCH Win32 login mode. The batch login mode is recommended for long running parallel processes. + interactive = "interactive" #: The LOGON32_LOGON_INTERACTIVE Win32 login mode. UAC is enabled on Windows VirtualMachineConfiguration Pools. If this option is used with an elevated user identity in a Windows VirtualMachineConfiguration Pool, the user session will not be elevated unless the application executed by the Task command line is configured to always require administrative privilege or to always require maximum privilege. + + +class OutputFileUploadCondition(str, Enum): + + task_success = "tasksuccess" #: Upload the file(s) only after the Task process exits with an exit code of 0. + task_failure = "taskfailure" #: Upload the file(s) only after the Task process exits with a nonzero exit code. + task_completion = "taskcompletion" #: Upload the file(s) after the Task process exits, no matter what the exit code was. + + +class ComputeNodeFillType(str, Enum): + + spread = "spread" #: Tasks should be assigned evenly across all Compute Nodes in the Pool. + pack = "pack" #: As many Tasks as possible (maxTasksPerNode) should be assigned to each Compute Node in the Pool before any Tasks are assigned to the next Compute Node in the Pool. + + +class CertificateStoreLocation(str, Enum): + + current_user = "currentuser" #: Certificates should be installed to the CurrentUser Certificate store. + local_machine = "localmachine" #: Certificates should be installed to the LocalMachine Certificate store. + + +class CertificateVisibility(str, Enum): + + start_task = "starttask" #: The Certificate should be visible to the user Account under which the start Task is run. + task = "task" #: The Certificate should be visible to the user Accounts under which Job Tasks are run. + remote_user = "remoteuser" #: The Certificate should be visible to the user Accounts under which users remotely access the Compute Node. + + +class CachingType(str, Enum): + + none = "none" #: The caching mode for the disk is not enabled. + read_only = "readonly" #: The caching mode for the disk is read only. + read_write = "readwrite" #: The caching mode for the disk is read and write. + + +class StorageAccountType(str, Enum): + + standard_lrs = "standard_lrs" #: The data disk should use standard locally redundant storage. + premium_lrs = "premium_lrs" #: The data disk should use premium locally redundant storage. + + +class DynamicVNetAssignmentScope(str, Enum): + + none = "none" #: No dynamic VNet assignment is enabled. + job = "job" #: Dynamic VNet assignment is done per-job. + + +class InboundEndpointProtocol(str, Enum): + + tcp = "tcp" #: Use TCP for the endpoint. + udp = "udp" #: Use UDP for the endpoint. + + +class NetworkSecurityGroupRuleAccess(str, Enum): + + allow = "allow" #: Allow access. + deny = "deny" #: Deny access. + + +class PoolLifetimeOption(str, Enum): + + job_schedule = "jobschedule" #: The Pool exists for the lifetime of the Job Schedule. The Batch Service creates the Pool when it creates the first Job on the schedule. You may apply this option only to Job Schedules, not to Jobs. + job = "job" #: The Pool exists for the lifetime of the Job to which it is dedicated. The Batch service creates the Pool when it creates the Job. If the 'job' option is applied to a Job Schedule, the Batch service creates a new auto Pool for every Job created on the schedule. + + +class OnAllTasksComplete(str, Enum): + + no_action = "noaction" #: Do nothing. The Job remains active unless terminated or disabled by some other means. + terminate_job = "terminatejob" #: Terminate the Job. The Job's terminateReason is set to 'AllTasksComplete'. + + +class OnTaskFailure(str, Enum): + + no_action = "noaction" #: Do nothing. The Job remains active unless terminated or disabled by some other means. + perform_exit_options_job_action = "performexitoptionsjobaction" #: Take the action associated with the Task exit condition in the Task's exitConditions collection. (This may still result in no action being taken, if that is what the Task specifies.) + + +class JobScheduleState(str, Enum): + + active = "active" #: The Job Schedule is active and will create Jobs as per its schedule. + completed = "completed" #: The Job Schedule has terminated, either by reaching its end time or by the user terminating it explicitly. + disabled = "disabled" #: The user has disabled the Job Schedule. The scheduler will not initiate any new Jobs will on this schedule, but any existing active Job will continue to run. + terminating = "terminating" #: The Job Schedule has no more work to do, or has been explicitly terminated by the user, but the termination operation is still in progress. The scheduler will not initiate any new Jobs for this Job Schedule, nor is any existing Job active. + deleting = "deleting" #: The user has requested that the Job Schedule be deleted, but the delete operation is still in progress. The scheduler will not initiate any new Jobs for this Job Schedule, and will delete any existing Jobs and Tasks under the Job Schedule, including any active Job. The Job Schedule will be deleted when all Jobs and Tasks under the Job Schedule have been deleted. + + +class ErrorCategory(str, Enum): + + user_error = "usererror" #: The error is due to a user issue, such as misconfiguration. + server_error = "servererror" #: The error is due to an internal server issue. + + +class JobState(str, Enum): + + active = "active" #: The Job is available to have Tasks scheduled. + disabling = "disabling" #: A user has requested that the Job be disabled, but the disable operation is still in progress (for example, waiting for Tasks to terminate). + disabled = "disabled" #: A user has disabled the Job. No Tasks are running, and no new Tasks will be scheduled. + enabling = "enabling" #: A user has requested that the Job be enabled, but the enable operation is still in progress. + terminating = "terminating" #: The Job is about to complete, either because a Job Manager Task has completed or because the user has terminated the Job, but the terminate operation is still in progress (for example, because Job Release Tasks are running). + completed = "completed" #: All Tasks have terminated, and the system will not accept any more Tasks or any further changes to the Job. + deleting = "deleting" #: A user has requested that the Job be deleted, but the delete operation is still in progress (for example, because the system is still terminating running Tasks). + + +class JobPreparationTaskState(str, Enum): + + running = "running" #: The Task is currently running (including retrying). + completed = "completed" #: The Task has exited with exit code 0, or the Task has exhausted its retry limit, or the Batch service was unable to start the Task due to Task preparation errors (such as resource file download failures). + + +class TaskExecutionResult(str, Enum): + + success = "success" #: The Task ran successfully. + failure = "failure" #: There was an error during processing of the Task. The failure may have occurred before the Task process was launched, while the Task process was executing, or after the Task process exited. + + +class JobReleaseTaskState(str, Enum): + + running = "running" #: The Task is currently running (including retrying). + completed = "completed" #: The Task has exited with exit code 0, or the Task has exhausted its retry limit, or the Batch service was unable to start the Task due to Task preparation errors (such as resource file download failures). + + +class PoolState(str, Enum): + + active = "active" #: The Pool is available to run Tasks subject to the availability of Compute Nodes. + deleting = "deleting" #: The user has requested that the Pool be deleted, but the delete operation has not yet completed. + + +class AllocationState(str, Enum): + + steady = "steady" #: The Pool is not resizing. There are no changes to the number of Compute Nodes in the Pool in progress. A Pool enters this state when it is created and when no operations are being performed on the Pool to change the number of Compute Nodes. + resizing = "resizing" #: The Pool is resizing; that is, Compute Nodes are being added to or removed from the Pool. + stopping = "stopping" #: The Pool was resizing, but the user has requested that the resize be stopped, but the stop request has not yet been completed. + + +class TaskState(str, Enum): + + active = "active" #: The Task is queued and able to run, but is not currently assigned to a Compute Node. A Task enters this state when it is created, when it is enabled after being disabled, or when it is awaiting a retry after a failed run. + preparing = "preparing" #: The Task has been assigned to a Compute Node, but is waiting for a required Job Preparation Task to complete on the Compute Node. If the Job Preparation Task succeeds, the Task will move to running. If the Job Preparation Task fails, the Task will return to active and will be eligible to be assigned to a different Compute Node. + running = "running" #: The Task is running on a Compute Node. This includes task-level preparation such as downloading resource files or deploying Packages specified on the Task - it does not necessarily mean that the Task command line has started executing. + completed = "completed" #: The Task is no longer eligible to run, usually because the Task has finished successfully, or the Task has finished unsuccessfully and has exhausted its retry limit. A Task is also marked as completed if an error occurred launching the Task, or when the Task has been terminated. + + +class TaskAddStatus(str, Enum): + + success = "success" #: The Task was added successfully. + client_error = "clienterror" #: The Task failed to add due to a client error and should not be retried without modifying the request as appropriate. + server_error = "servererror" #: Task failed to add due to a server error and can be retried without modification. + + +class SubtaskState(str, Enum): + + preparing = "preparing" #: The Task has been assigned to a Compute Node, but is waiting for a required Job Preparation Task to complete on the Compute Node. If the Job Preparation Task succeeds, the Task will move to running. If the Job Preparation Task fails, the Task will return to active and will be eligible to be assigned to a different Compute Node. + running = "running" #: The Task is running on a Compute Node. This includes task-level preparation such as downloading resource files or deploying Packages specified on the Task - it does not necessarily mean that the Task command line has started executing. + completed = "completed" #: The Task is no longer eligible to run, usually because the Task has finished successfully, or the Task has finished unsuccessfully and has exhausted its retry limit. A Task is also marked as completed if an error occurred launching the Task, or when the Task has been terminated. + + +class StartTaskState(str, Enum): + + running = "running" #: The start Task is currently running. + completed = "completed" #: The start Task has exited with exit code 0, or the start Task has failed and the retry limit has reached, or the start Task process did not run due to Task preparation errors (such as resource file download failures). + + +class ComputeNodeState(str, Enum): + + idle = "idle" #: The Compute Node is not currently running a Task. + rebooting = "rebooting" #: The Compute Node is rebooting. + reimaging = "reimaging" #: The Compute Node is reimaging. + running = "running" #: The Compute Node is running one or more Tasks (other than a start task). + unusable = "unusable" #: The Compute Node cannot be used for Task execution due to errors. + creating = "creating" #: The Batch service has obtained the underlying virtual machine from Azure Compute, but it has not yet started to join the Pool. + starting = "starting" #: The Batch service is starting on the underlying virtual machine. + waiting_for_start_task = "waitingforstarttask" #: The start Task has started running on the Compute Node, but waitForSuccess is set and the start Task has not yet completed. + start_task_failed = "starttaskfailed" #: The start Task has failed on the Compute Node (and exhausted all retries), and waitForSuccess is set. The Compute Node is not usable for running Tasks. + unknown = "unknown" #: The Batch service has lost contact with the Compute Node, and does not know its true state. + leaving_pool = "leavingpool" #: The Compute Node is leaving the Pool, either because the user explicitly removed it or because the Pool is resizing or autoscaling down. + offline = "offline" #: The Compute Node is not currently running a Task, and scheduling of new Tasks to the Compute Node is disabled. + preempted = "preempted" #: The low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available. + + +class SchedulingState(str, Enum): + + enabled = "enabled" #: Tasks can be scheduled on the Compute Node. + disabled = "disabled" #: No new Tasks will be scheduled on the Compute Node. Tasks already running on the Compute Node may still run to completion. All Compute Nodes start with scheduling enabled. + + +class DisableJobOption(str, Enum): + + requeue = "requeue" #: Terminate running Tasks and requeue them. The Tasks will run again when the Job is enabled. + terminate = "terminate" #: Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. + wait = "wait" #: Allow currently running Tasks to complete. + + +class ComputeNodeDeallocationOption(str, Enum): + + requeue = "requeue" #: Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute Node is available. Remove Compute Nodes as soon as Tasks have been terminated. + terminate = "terminate" #: Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Remove Compute Nodes as soon as Tasks have been terminated. + task_completion = "taskcompletion" #: Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Remove Compute Nodes when all Tasks have completed. + retained_data = "retaineddata" #: Allow currently running Tasks to complete, then wait for all Task data retention periods to expire. Schedule no new Tasks while waiting. Remove Compute Nodes when all Task retention periods have expired. + + +class ComputeNodeRebootOption(str, Enum): + + requeue = "requeue" #: Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute Node is available. Restart the Compute Node as soon as Tasks have been terminated. + terminate = "terminate" #: Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Restart the Compute Node as soon as Tasks have been terminated. + task_completion = "taskcompletion" #: Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Restart the Compute Node when all Tasks have completed. + retained_data = "retaineddata" #: Allow currently running Tasks to complete, then wait for all Task data retention periods to expire. Schedule no new Tasks while waiting. Restart the Compute Node when all Task retention periods have expired. + + +class ComputeNodeReimageOption(str, Enum): + + requeue = "requeue" #: Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute Node is available. Reimage the Compute Node as soon as Tasks have been terminated. + terminate = "terminate" #: Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Reimage the Compute Node as soon as Tasks have been terminated. + task_completion = "taskcompletion" #: Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Reimage the Compute Node when all Tasks have completed. + retained_data = "retaineddata" #: Allow currently running Tasks to complete, then wait for all Task data retention periods to expire. Schedule no new Tasks while waiting. Reimage the Compute Node when all Task retention periods have expired. + + +class DisableComputeNodeSchedulingOption(str, Enum): + + requeue = "requeue" #: Terminate running Task processes and requeue the Tasks. The Tasks may run again on other Compute Nodes, or when Task scheduling is re-enabled on this Compute Node. Enter offline state as soon as Tasks have been terminated. + terminate = "terminate" #: Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Enter offline state as soon as Tasks have been terminated. + task_completion = "taskcompletion" #: Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Enter offline state when all Tasks have completed. diff --git a/azext/generated/sdk/batch/v2019_06_01/models/certificate.py b/azext/generated/sdk/batch/v2019_06_01/models/certificate.py new file mode 100644 index 00000000..b51ca681 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/certificate.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Certificate(Model): + """A Certificate that can be installed on Compute Nodes and can be used to + authenticate operations on the machine. + + :param thumbprint: The X.509 thumbprint of the Certificate. This is a + sequence of up to 40 hex digits. + :type thumbprint: str + :param thumbprint_algorithm: The algorithm used to derive the thumbprint. + :type thumbprint_algorithm: str + :param url: The URL of the Certificate. + :type url: str + :param state: The current state of the Certificate. Possible values + include: 'active', 'deleting', 'deleteFailed' + :type state: str or ~azure.batch.models.CertificateState + :param state_transition_time: The time at which the Certificate entered + its current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the Certificate. This + property is not set if the Certificate is in its initial active state. + Possible values include: 'active', 'deleting', 'deleteFailed' + :type previous_state: str or ~azure.batch.models.CertificateState + :param previous_state_transition_time: The time at which the Certificate + entered its previous state. This property is not set if the Certificate is + in its initial Active state. + :type previous_state_transition_time: datetime + :param public_data: The public part of the Certificate as a base-64 + encoded .cer file. + :type public_data: str + :param delete_certificate_error: The error that occurred on the last + attempt to delete this Certificate. This property is set only if the + Certificate is in the DeleteFailed state. + :type delete_certificate_error: ~azure.batch.models.DeleteCertificateError + """ + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'CertificateState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'CertificateState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'public_data': {'key': 'publicData', 'type': 'str'}, + 'delete_certificate_error': {'key': 'deleteCertificateError', 'type': 'DeleteCertificateError'}, + } + + def __init__(self, **kwargs): + super(Certificate, self).__init__(**kwargs) + self.thumbprint = kwargs.get('thumbprint', None) + self.thumbprint_algorithm = kwargs.get('thumbprint_algorithm', None) + self.url = kwargs.get('url', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.previous_state = kwargs.get('previous_state', None) + self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) + self.public_data = kwargs.get('public_data', None) + self.delete_certificate_error = kwargs.get('delete_certificate_error', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/certificate_add_options.py b/azext/generated/sdk/batch/v2019_06_01/models/certificate_add_options.py new file mode 100644 index 00000000..f2c8d5bb --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/certificate_add_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(CertificateAddOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/certificate_add_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/certificate_add_options_py3.py new file mode 100644 index 00000000..c7d61b36 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/certificate_add_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(CertificateAddOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/certificate_add_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/certificate_add_parameter.py new file mode 100644 index 00000000..497ecad5 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/certificate_add_parameter.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateAddParameter(Model): + """A Certificate that can be installed on Compute Nodes and can be used to + authenticate operations on the machine. + + All required parameters must be populated in order to send to Azure. + + :param thumbprint: Required. The X.509 thumbprint of the Certificate. This + is a sequence of up to 40 hex digits (it may include spaces but these are + removed). + :type thumbprint: str + :param thumbprint_algorithm: Required. The algorithm used to derive the + thumbprint. This must be sha1. + :type thumbprint_algorithm: str + :param data: Required. The base64-encoded contents of the Certificate. The + maximum size is 10KB. + :type data: str + :param certificate_format: The format of the Certificate data. Possible + values include: 'pfx', 'cer' + :type certificate_format: str or ~azure.batch.models.CertificateFormat + :param password: The password to access the Certificate's private key. + This is required if the Certificate format is pfx. It should be omitted if + the Certificate format is cer. + :type password: str + """ + + _validation = { + 'thumbprint': {'required': True}, + 'thumbprint_algorithm': {'required': True}, + 'data': {'required': True}, + } + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'data': {'key': 'data', 'type': 'str'}, + 'certificate_format': {'key': 'certificateFormat', 'type': 'CertificateFormat'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(CertificateAddParameter, self).__init__(**kwargs) + self.thumbprint = kwargs.get('thumbprint', None) + self.thumbprint_algorithm = kwargs.get('thumbprint_algorithm', None) + self.data = kwargs.get('data', None) + self.certificate_format = kwargs.get('certificate_format', None) + self.password = kwargs.get('password', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/certificate_add_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/certificate_add_parameter_py3.py new file mode 100644 index 00000000..70c3f6c0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/certificate_add_parameter_py3.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateAddParameter(Model): + """A Certificate that can be installed on Compute Nodes and can be used to + authenticate operations on the machine. + + All required parameters must be populated in order to send to Azure. + + :param thumbprint: Required. The X.509 thumbprint of the Certificate. This + is a sequence of up to 40 hex digits (it may include spaces but these are + removed). + :type thumbprint: str + :param thumbprint_algorithm: Required. The algorithm used to derive the + thumbprint. This must be sha1. + :type thumbprint_algorithm: str + :param data: Required. The base64-encoded contents of the Certificate. The + maximum size is 10KB. + :type data: str + :param certificate_format: The format of the Certificate data. Possible + values include: 'pfx', 'cer' + :type certificate_format: str or ~azure.batch.models.CertificateFormat + :param password: The password to access the Certificate's private key. + This is required if the Certificate format is pfx. It should be omitted if + the Certificate format is cer. + :type password: str + """ + + _validation = { + 'thumbprint': {'required': True}, + 'thumbprint_algorithm': {'required': True}, + 'data': {'required': True}, + } + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'data': {'key': 'data', 'type': 'str'}, + 'certificate_format': {'key': 'certificateFormat', 'type': 'CertificateFormat'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, *, thumbprint: str, thumbprint_algorithm: str, data: str, certificate_format=None, password: str=None, **kwargs) -> None: + super(CertificateAddParameter, self).__init__(**kwargs) + self.thumbprint = thumbprint + self.thumbprint_algorithm = thumbprint_algorithm + self.data = data + self.certificate_format = certificate_format + self.password = password diff --git a/azext/generated/sdk/batch/v2019_06_01/models/certificate_cancel_deletion_options.py b/azext/generated/sdk/batch/v2019_06_01/models/certificate_cancel_deletion_options.py new file mode 100644 index 00000000..5c7c936c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/certificate_cancel_deletion_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateCancelDeletionOptions(Model): + """Additional parameters for cancel_deletion operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(CertificateCancelDeletionOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/certificate_cancel_deletion_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/certificate_cancel_deletion_options_py3.py new file mode 100644 index 00000000..8afbcf24 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/certificate_cancel_deletion_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateCancelDeletionOptions(Model): + """Additional parameters for cancel_deletion operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(CertificateCancelDeletionOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/certificate_delete_options.py b/azext/generated/sdk/batch/v2019_06_01/models/certificate_delete_options.py new file mode 100644 index 00000000..5ff7ee83 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/certificate_delete_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(CertificateDeleteOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/certificate_delete_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/certificate_delete_options_py3.py new file mode 100644 index 00000000..47f91b10 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/certificate_delete_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(CertificateDeleteOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/certificate_get_options.py b/azext/generated/sdk/batch/v2019_06_01/models/certificate_get_options.py new file mode 100644 index 00000000..2b474c17 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/certificate_get_options.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(CertificateGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/certificate_get_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/certificate_get_options_py3.py new file mode 100644 index 00000000..4bd6bb70 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/certificate_get_options_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(CertificateGetOptions, self).__init__(**kwargs) + self.select = select + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/certificate_list_options.py b/azext/generated/sdk/batch/v2019_06_01/models/certificate_list_options.py new file mode 100644 index 00000000..39c31a47 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/certificate_list_options.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Certificates can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(CertificateListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/certificate_list_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/certificate_list_options_py3.py new file mode 100644 index 00000000..d98edb44 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/certificate_list_options_py3.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Certificates can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(CertificateListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/certificate_paged.py b/azext/generated/sdk/batch/v2019_06_01/models/certificate_paged.py new file mode 100644 index 00000000..985d7838 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/certificate_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class CertificatePaged(Paged): + """ + A paging container for iterating over a list of :class:`Certificate ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[Certificate]'} + } + + def __init__(self, *args, **kwargs): + + super(CertificatePaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/certificate_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/certificate_py3.py new file mode 100644 index 00000000..cd64a868 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/certificate_py3.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Certificate(Model): + """A Certificate that can be installed on Compute Nodes and can be used to + authenticate operations on the machine. + + :param thumbprint: The X.509 thumbprint of the Certificate. This is a + sequence of up to 40 hex digits. + :type thumbprint: str + :param thumbprint_algorithm: The algorithm used to derive the thumbprint. + :type thumbprint_algorithm: str + :param url: The URL of the Certificate. + :type url: str + :param state: The current state of the Certificate. Possible values + include: 'active', 'deleting', 'deleteFailed' + :type state: str or ~azure.batch.models.CertificateState + :param state_transition_time: The time at which the Certificate entered + its current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the Certificate. This + property is not set if the Certificate is in its initial active state. + Possible values include: 'active', 'deleting', 'deleteFailed' + :type previous_state: str or ~azure.batch.models.CertificateState + :param previous_state_transition_time: The time at which the Certificate + entered its previous state. This property is not set if the Certificate is + in its initial Active state. + :type previous_state_transition_time: datetime + :param public_data: The public part of the Certificate as a base-64 + encoded .cer file. + :type public_data: str + :param delete_certificate_error: The error that occurred on the last + attempt to delete this Certificate. This property is set only if the + Certificate is in the DeleteFailed state. + :type delete_certificate_error: ~azure.batch.models.DeleteCertificateError + """ + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'CertificateState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'CertificateState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'public_data': {'key': 'publicData', 'type': 'str'}, + 'delete_certificate_error': {'key': 'deleteCertificateError', 'type': 'DeleteCertificateError'}, + } + + def __init__(self, *, thumbprint: str=None, thumbprint_algorithm: str=None, url: str=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, public_data: str=None, delete_certificate_error=None, **kwargs) -> None: + super(Certificate, self).__init__(**kwargs) + self.thumbprint = thumbprint + self.thumbprint_algorithm = thumbprint_algorithm + self.url = url + self.state = state + self.state_transition_time = state_transition_time + self.previous_state = previous_state + self.previous_state_transition_time = previous_state_transition_time + self.public_data = public_data + self.delete_certificate_error = delete_certificate_error diff --git a/azext/generated/sdk/batch/v2019_06_01/models/certificate_reference.py b/azext/generated/sdk/batch/v2019_06_01/models/certificate_reference.py new file mode 100644 index 00000000..dd759a08 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/certificate_reference.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateReference(Model): + """A reference to a Certificate to be installed on Compute Nodes in a Pool. + + All required parameters must be populated in order to send to Azure. + + :param thumbprint: Required. The thumbprint of the Certificate. + :type thumbprint: str + :param thumbprint_algorithm: Required. The algorithm with which the + thumbprint is associated. This must be sha1. + :type thumbprint_algorithm: str + :param store_location: The location of the Certificate store on the + Compute Node into which to install the Certificate. The default value is + currentuser. This property is applicable only for Pools configured with + Windows Compute Nodes (that is, created with cloudServiceConfiguration, or + with virtualMachineConfiguration using a Windows Image reference). For + Linux Compute Nodes, the Certificates are stored in a directory inside the + Task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this + location. For Certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and Certificates are placed in that directory. + Possible values include: 'currentUser', 'localMachine' + :type store_location: str or ~azure.batch.models.CertificateStoreLocation + :param store_name: The name of the Certificate store on the Compute Node + into which to install the Certificate. This property is applicable only + for Pools configured with Windows Compute Nodes (that is, created with + cloudServiceConfiguration, or with virtualMachineConfiguration using a + Windows Image reference). Common store names include: My, Root, CA, Trust, + Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but + any custom store name can also be used. The default value is My. + :type store_name: str + :param visibility: Which user Accounts on the Compute Node should have + access to the private data of the Certificate. You can specify more than + one visibility in this collection. The default is all Accounts. + :type visibility: list[str or ~azure.batch.models.CertificateVisibility] + """ + + _validation = { + 'thumbprint': {'required': True}, + 'thumbprint_algorithm': {'required': True}, + } + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'store_location': {'key': 'storeLocation', 'type': 'CertificateStoreLocation'}, + 'store_name': {'key': 'storeName', 'type': 'str'}, + 'visibility': {'key': 'visibility', 'type': '[CertificateVisibility]'}, + } + + def __init__(self, **kwargs): + super(CertificateReference, self).__init__(**kwargs) + self.thumbprint = kwargs.get('thumbprint', None) + self.thumbprint_algorithm = kwargs.get('thumbprint_algorithm', None) + self.store_location = kwargs.get('store_location', None) + self.store_name = kwargs.get('store_name', None) + self.visibility = kwargs.get('visibility', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/certificate_reference_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/certificate_reference_py3.py new file mode 100644 index 00000000..d125e9e9 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/certificate_reference_py3.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateReference(Model): + """A reference to a Certificate to be installed on Compute Nodes in a Pool. + + All required parameters must be populated in order to send to Azure. + + :param thumbprint: Required. The thumbprint of the Certificate. + :type thumbprint: str + :param thumbprint_algorithm: Required. The algorithm with which the + thumbprint is associated. This must be sha1. + :type thumbprint_algorithm: str + :param store_location: The location of the Certificate store on the + Compute Node into which to install the Certificate. The default value is + currentuser. This property is applicable only for Pools configured with + Windows Compute Nodes (that is, created with cloudServiceConfiguration, or + with virtualMachineConfiguration using a Windows Image reference). For + Linux Compute Nodes, the Certificates are stored in a directory inside the + Task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this + location. For Certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and Certificates are placed in that directory. + Possible values include: 'currentUser', 'localMachine' + :type store_location: str or ~azure.batch.models.CertificateStoreLocation + :param store_name: The name of the Certificate store on the Compute Node + into which to install the Certificate. This property is applicable only + for Pools configured with Windows Compute Nodes (that is, created with + cloudServiceConfiguration, or with virtualMachineConfiguration using a + Windows Image reference). Common store names include: My, Root, CA, Trust, + Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but + any custom store name can also be used. The default value is My. + :type store_name: str + :param visibility: Which user Accounts on the Compute Node should have + access to the private data of the Certificate. You can specify more than + one visibility in this collection. The default is all Accounts. + :type visibility: list[str or ~azure.batch.models.CertificateVisibility] + """ + + _validation = { + 'thumbprint': {'required': True}, + 'thumbprint_algorithm': {'required': True}, + } + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'store_location': {'key': 'storeLocation', 'type': 'CertificateStoreLocation'}, + 'store_name': {'key': 'storeName', 'type': 'str'}, + 'visibility': {'key': 'visibility', 'type': '[CertificateVisibility]'}, + } + + def __init__(self, *, thumbprint: str, thumbprint_algorithm: str, store_location=None, store_name: str=None, visibility=None, **kwargs) -> None: + super(CertificateReference, self).__init__(**kwargs) + self.thumbprint = thumbprint + self.thumbprint_algorithm = thumbprint_algorithm + self.store_location = store_location + self.store_name = store_name + self.visibility = visibility diff --git a/azext/generated/sdk/batch/v2019_06_01/models/cloud_job.py b/azext/generated/sdk/batch/v2019_06_01/models/cloud_job.py new file mode 100644 index 00000000..a2b73818 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/cloud_job.py @@ -0,0 +1,164 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudJob(Model): + """An Azure Batch Job. + + :param id: A string that uniquely identifies the Job within the Account. + The ID is case-preserving and case-insensitive (that is, you may not have + two IDs within an Account that differ only by case). + :type id: str + :param display_name: The display name for the Job. + :type display_name: str + :param uses_task_dependencies: Whether Tasks in the Job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + :param url: The URL of the Job. + :type url: str + :param e_tag: The ETag of the Job. This is an opaque string. You can use + it to detect whether the Job has changed between requests. In particular, + you can be pass the ETag when updating a Job to specify that your changes + should take effect only if nobody else has modified the Job in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the Job. This is the last + time at which the Job level data, such as the Job state or priority, + changed. It does not factor in task-level changes such as adding new Tasks + or Tasks changing state. + :type last_modified: datetime + :param creation_time: The creation time of the Job. + :type creation_time: datetime + :param state: The current state of the Job. Possible values include: + 'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed', + 'deleting' + :type state: str or ~azure.batch.models.JobState + :param state_transition_time: The time at which the Job entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the Job. This property is not + set if the Job is in its initial Active state. Possible values include: + 'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed', + 'deleting' + :type previous_state: str or ~azure.batch.models.JobState + :param previous_state_transition_time: The time at which the Job entered + its previous state. This property is not set if the Job is in its initial + Active state. + :type previous_state_transition_time: datetime + :param priority: The priority of the Job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. The default value is 0. + :type priority: int + :param constraints: The execution constraints for the Job. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: Details of a Job Manager Task to be launched when + the Job is started. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation Task. The Job Preparation + Task is a special Task run on each Compute Node before any other Task of + the Job. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release Task. The Job Release Task is a + special Task run at the end of the Job on each Compute Node that has run + any other Task of the Job. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: The list of common environment + variable settings. These environment variables are set for all Tasks in + the Job (including the Job Manager, Job Preparation and Job Release + Tasks). Individual Tasks can override an environment setting specified + here by specifying the same setting name with a different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: The Pool settings associated with the Job. + :type pool_info: ~azure.batch.models.PoolInformation + :param on_all_tasks_complete: The action the Batch service should take + when all Tasks in the Job are in the completed state. The default is + noaction. Possible values include: 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + Task in the Job fails. A Task is considered to have failed if has a + failureInfo. A failureInfo is set if the Task completes with a non-zero + exit code after exhausting its retry count, or if there was an error + starting the Task, for example due to a resource file download error. The + default is noaction. Possible values include: 'noAction', + 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param network_configuration: The network configuration for the Job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration + :param metadata: A list of name-value pairs associated with the Job as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param execution_info: The execution information for the Job. + :type execution_info: ~azure.batch.models.JobExecutionInformation + :param stats: Resource usage statistics for the entire lifetime of the + Job. This property is populated only if the CloudJob was retrieved with an + expand clause including the 'stats' attribute; otherwise it is null. The + statistics may not be immediately available. The Batch service performs + periodic roll-up of statistics. The typical delay is about 30 minutes. + :type stats: ~azure.batch.models.JobStatistics + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'JobState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'execution_info': {'key': 'executionInfo', 'type': 'JobExecutionInformation'}, + 'stats': {'key': 'stats', 'type': 'JobStatistics'}, + } + + def __init__(self, **kwargs): + super(CloudJob, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.uses_task_dependencies = kwargs.get('uses_task_dependencies', None) + self.url = kwargs.get('url', None) + self.e_tag = kwargs.get('e_tag', None) + self.last_modified = kwargs.get('last_modified', None) + self.creation_time = kwargs.get('creation_time', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.previous_state = kwargs.get('previous_state', None) + self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) + self.priority = kwargs.get('priority', None) + self.constraints = kwargs.get('constraints', None) + self.job_manager_task = kwargs.get('job_manager_task', None) + self.job_preparation_task = kwargs.get('job_preparation_task', None) + self.job_release_task = kwargs.get('job_release_task', None) + self.common_environment_settings = kwargs.get('common_environment_settings', None) + self.pool_info = kwargs.get('pool_info', None) + self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) + self.on_task_failure = kwargs.get('on_task_failure', None) + self.network_configuration = kwargs.get('network_configuration', None) + self.metadata = kwargs.get('metadata', None) + self.execution_info = kwargs.get('execution_info', None) + self.stats = kwargs.get('stats', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/cloud_job_paged.py b/azext/generated/sdk/batch/v2019_06_01/models/cloud_job_paged.py new file mode 100644 index 00000000..c642458f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/cloud_job_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class CloudJobPaged(Paged): + """ + A paging container for iterating over a list of :class:`CloudJob ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[CloudJob]'} + } + + def __init__(self, *args, **kwargs): + + super(CloudJobPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/cloud_job_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/cloud_job_py3.py new file mode 100644 index 00000000..fc07528a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/cloud_job_py3.py @@ -0,0 +1,164 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudJob(Model): + """An Azure Batch Job. + + :param id: A string that uniquely identifies the Job within the Account. + The ID is case-preserving and case-insensitive (that is, you may not have + two IDs within an Account that differ only by case). + :type id: str + :param display_name: The display name for the Job. + :type display_name: str + :param uses_task_dependencies: Whether Tasks in the Job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + :param url: The URL of the Job. + :type url: str + :param e_tag: The ETag of the Job. This is an opaque string. You can use + it to detect whether the Job has changed between requests. In particular, + you can be pass the ETag when updating a Job to specify that your changes + should take effect only if nobody else has modified the Job in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the Job. This is the last + time at which the Job level data, such as the Job state or priority, + changed. It does not factor in task-level changes such as adding new Tasks + or Tasks changing state. + :type last_modified: datetime + :param creation_time: The creation time of the Job. + :type creation_time: datetime + :param state: The current state of the Job. Possible values include: + 'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed', + 'deleting' + :type state: str or ~azure.batch.models.JobState + :param state_transition_time: The time at which the Job entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the Job. This property is not + set if the Job is in its initial Active state. Possible values include: + 'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed', + 'deleting' + :type previous_state: str or ~azure.batch.models.JobState + :param previous_state_transition_time: The time at which the Job entered + its previous state. This property is not set if the Job is in its initial + Active state. + :type previous_state_transition_time: datetime + :param priority: The priority of the Job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. The default value is 0. + :type priority: int + :param constraints: The execution constraints for the Job. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: Details of a Job Manager Task to be launched when + the Job is started. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation Task. The Job Preparation + Task is a special Task run on each Compute Node before any other Task of + the Job. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release Task. The Job Release Task is a + special Task run at the end of the Job on each Compute Node that has run + any other Task of the Job. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: The list of common environment + variable settings. These environment variables are set for all Tasks in + the Job (including the Job Manager, Job Preparation and Job Release + Tasks). Individual Tasks can override an environment setting specified + here by specifying the same setting name with a different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: The Pool settings associated with the Job. + :type pool_info: ~azure.batch.models.PoolInformation + :param on_all_tasks_complete: The action the Batch service should take + when all Tasks in the Job are in the completed state. The default is + noaction. Possible values include: 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + Task in the Job fails. A Task is considered to have failed if has a + failureInfo. A failureInfo is set if the Task completes with a non-zero + exit code after exhausting its retry count, or if there was an error + starting the Task, for example due to a resource file download error. The + default is noaction. Possible values include: 'noAction', + 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param network_configuration: The network configuration for the Job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration + :param metadata: A list of name-value pairs associated with the Job as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param execution_info: The execution information for the Job. + :type execution_info: ~azure.batch.models.JobExecutionInformation + :param stats: Resource usage statistics for the entire lifetime of the + Job. This property is populated only if the CloudJob was retrieved with an + expand clause including the 'stats' attribute; otherwise it is null. The + statistics may not be immediately available. The Batch service performs + periodic roll-up of statistics. The typical delay is about 30 minutes. + :type stats: ~azure.batch.models.JobStatistics + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'JobState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'execution_info': {'key': 'executionInfo', 'type': 'JobExecutionInformation'}, + 'stats': {'key': 'stats', 'type': 'JobStatistics'}, + } + + def __init__(self, *, id: str=None, display_name: str=None, uses_task_dependencies: bool=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, priority: int=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, pool_info=None, on_all_tasks_complete=None, on_task_failure=None, network_configuration=None, metadata=None, execution_info=None, stats=None, **kwargs) -> None: + super(CloudJob, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.uses_task_dependencies = uses_task_dependencies + self.url = url + self.e_tag = e_tag + self.last_modified = last_modified + self.creation_time = creation_time + self.state = state + self.state_transition_time = state_transition_time + self.previous_state = previous_state + self.previous_state_transition_time = previous_state_transition_time + self.priority = priority + self.constraints = constraints + self.job_manager_task = job_manager_task + self.job_preparation_task = job_preparation_task + self.job_release_task = job_release_task + self.common_environment_settings = common_environment_settings + self.pool_info = pool_info + self.on_all_tasks_complete = on_all_tasks_complete + self.on_task_failure = on_task_failure + self.network_configuration = network_configuration + self.metadata = metadata + self.execution_info = execution_info + self.stats = stats diff --git a/azext/generated/sdk/batch/v2019_06_01/models/cloud_job_schedule.py b/azext/generated/sdk/batch/v2019_06_01/models/cloud_job_schedule.py new file mode 100644 index 00000000..14eeed02 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/cloud_job_schedule.py @@ -0,0 +1,107 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudJobSchedule(Model): + """A Job Schedule that allows recurring Jobs by specifying when to run Jobs + and a specification used to create each Job. + + :param id: A string that uniquely identifies the schedule within the + Account. + :type id: str + :param display_name: The display name for the schedule. + :type display_name: str + :param url: The URL of the Job Schedule. + :type url: str + :param e_tag: The ETag of the Job Schedule. This is an opaque string. You + can use it to detect whether the Job Schedule has changed between + requests. In particular, you can be pass the ETag with an Update Job + Schedule request to specify that your changes should take effect only if + nobody else has modified the schedule in the meantime. + :type e_tag: str + :param last_modified: The last modified time of the Job Schedule. This is + the last time at which the schedule level data, such as the Job + specification or recurrence information, changed. It does not factor in + job-level changes such as new Jobs being created or Jobs changing state. + :type last_modified: datetime + :param creation_time: The creation time of the Job Schedule. + :type creation_time: datetime + :param state: The current state of the Job Schedule. Possible values + include: 'active', 'completed', 'disabled', 'terminating', 'deleting' + :type state: str or ~azure.batch.models.JobScheduleState + :param state_transition_time: The time at which the Job Schedule entered + the current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the Job Schedule. This + property is not present if the Job Schedule is in its initial active + state. Possible values include: 'active', 'completed', 'disabled', + 'terminating', 'deleting' + :type previous_state: str or ~azure.batch.models.JobScheduleState + :param previous_state_transition_time: The time at which the Job Schedule + entered its previous state. This property is not present if the Job + Schedule is in its initial active state. + :type previous_state_transition_time: datetime + :param schedule: The schedule according to which Jobs will be created. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: The details of the Jobs to be created on this + schedule. + :type job_specification: ~azure.batch.models.JobSpecification + :param execution_info: Information about Jobs that have been and will be + run under this schedule. + :type execution_info: ~azure.batch.models.JobScheduleExecutionInformation + :param metadata: A list of name-value pairs associated with the schedule + as metadata. The Batch service does not assign any meaning to metadata; it + is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param stats: The lifetime resource usage statistics for the Job Schedule. + The statistics may not be immediately available. The Batch service + performs periodic roll-up of statistics. The typical delay is about 30 + minutes. + :type stats: ~azure.batch.models.JobScheduleStatistics + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobScheduleState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'JobScheduleState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'execution_info': {'key': 'executionInfo', 'type': 'JobScheduleExecutionInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'stats': {'key': 'stats', 'type': 'JobScheduleStatistics'}, + } + + def __init__(self, **kwargs): + super(CloudJobSchedule, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.url = kwargs.get('url', None) + self.e_tag = kwargs.get('e_tag', None) + self.last_modified = kwargs.get('last_modified', None) + self.creation_time = kwargs.get('creation_time', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.previous_state = kwargs.get('previous_state', None) + self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) + self.schedule = kwargs.get('schedule', None) + self.job_specification = kwargs.get('job_specification', None) + self.execution_info = kwargs.get('execution_info', None) + self.metadata = kwargs.get('metadata', None) + self.stats = kwargs.get('stats', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/cloud_job_schedule_paged.py b/azext/generated/sdk/batch/v2019_06_01/models/cloud_job_schedule_paged.py new file mode 100644 index 00000000..3abb6f15 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/cloud_job_schedule_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class CloudJobSchedulePaged(Paged): + """ + A paging container for iterating over a list of :class:`CloudJobSchedule ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[CloudJobSchedule]'} + } + + def __init__(self, *args, **kwargs): + + super(CloudJobSchedulePaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/cloud_job_schedule_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/cloud_job_schedule_py3.py new file mode 100644 index 00000000..da9b187f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/cloud_job_schedule_py3.py @@ -0,0 +1,107 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudJobSchedule(Model): + """A Job Schedule that allows recurring Jobs by specifying when to run Jobs + and a specification used to create each Job. + + :param id: A string that uniquely identifies the schedule within the + Account. + :type id: str + :param display_name: The display name for the schedule. + :type display_name: str + :param url: The URL of the Job Schedule. + :type url: str + :param e_tag: The ETag of the Job Schedule. This is an opaque string. You + can use it to detect whether the Job Schedule has changed between + requests. In particular, you can be pass the ETag with an Update Job + Schedule request to specify that your changes should take effect only if + nobody else has modified the schedule in the meantime. + :type e_tag: str + :param last_modified: The last modified time of the Job Schedule. This is + the last time at which the schedule level data, such as the Job + specification or recurrence information, changed. It does not factor in + job-level changes such as new Jobs being created or Jobs changing state. + :type last_modified: datetime + :param creation_time: The creation time of the Job Schedule. + :type creation_time: datetime + :param state: The current state of the Job Schedule. Possible values + include: 'active', 'completed', 'disabled', 'terminating', 'deleting' + :type state: str or ~azure.batch.models.JobScheduleState + :param state_transition_time: The time at which the Job Schedule entered + the current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the Job Schedule. This + property is not present if the Job Schedule is in its initial active + state. Possible values include: 'active', 'completed', 'disabled', + 'terminating', 'deleting' + :type previous_state: str or ~azure.batch.models.JobScheduleState + :param previous_state_transition_time: The time at which the Job Schedule + entered its previous state. This property is not present if the Job + Schedule is in its initial active state. + :type previous_state_transition_time: datetime + :param schedule: The schedule according to which Jobs will be created. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: The details of the Jobs to be created on this + schedule. + :type job_specification: ~azure.batch.models.JobSpecification + :param execution_info: Information about Jobs that have been and will be + run under this schedule. + :type execution_info: ~azure.batch.models.JobScheduleExecutionInformation + :param metadata: A list of name-value pairs associated with the schedule + as metadata. The Batch service does not assign any meaning to metadata; it + is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param stats: The lifetime resource usage statistics for the Job Schedule. + The statistics may not be immediately available. The Batch service + performs periodic roll-up of statistics. The typical delay is about 30 + minutes. + :type stats: ~azure.batch.models.JobScheduleStatistics + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobScheduleState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'JobScheduleState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'execution_info': {'key': 'executionInfo', 'type': 'JobScheduleExecutionInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'stats': {'key': 'stats', 'type': 'JobScheduleStatistics'}, + } + + def __init__(self, *, id: str=None, display_name: str=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, schedule=None, job_specification=None, execution_info=None, metadata=None, stats=None, **kwargs) -> None: + super(CloudJobSchedule, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.url = url + self.e_tag = e_tag + self.last_modified = last_modified + self.creation_time = creation_time + self.state = state + self.state_transition_time = state_transition_time + self.previous_state = previous_state + self.previous_state_transition_time = previous_state_transition_time + self.schedule = schedule + self.job_specification = job_specification + self.execution_info = execution_info + self.metadata = metadata + self.stats = stats diff --git a/azext/generated/sdk/batch/v2019_06_01/models/cloud_pool.py b/azext/generated/sdk/batch/v2019_06_01/models/cloud_pool.py new file mode 100644 index 00000000..fa0e7b7e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/cloud_pool.py @@ -0,0 +1,245 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudPool(Model): + """A Pool in the Azure Batch service. + + :param id: A string that uniquely identifies the Pool within the Account. + The ID can contain any combination of alphanumeric characters including + hyphens and underscores, and cannot contain more than 64 characters. The + ID is case-preserving and case-insensitive (that is, you may not have two + IDs within an Account that differ only by case). + :type id: str + :param display_name: The display name for the Pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param url: The URL of the Pool. + :type url: str + :param e_tag: The ETag of the Pool. This is an opaque string. You can use + it to detect whether the Pool has changed between requests. In particular, + you can be pass the ETag when updating a Pool to specify that your changes + should take effect only if nobody else has modified the Pool in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the Pool. This is the last + time at which the Pool level data, such as the targetDedicatedNodes or + enableAutoscale settings, changed. It does not factor in node-level + changes such as a Compute Node changing state. + :type last_modified: datetime + :param creation_time: The creation time of the Pool. + :type creation_time: datetime + :param state: The current state of the Pool. Possible values include: + 'active', 'deleting' + :type state: str or ~azure.batch.models.PoolState + :param state_transition_time: The time at which the Pool entered its + current state. + :type state_transition_time: datetime + :param allocation_state: Whether the Pool is resizing. Possible values + include: 'steady', 'resizing', 'stopping' + :type allocation_state: str or ~azure.batch.models.AllocationState + :param allocation_state_transition_time: The time at which the Pool + entered its current allocation state. + :type allocation_state_transition_time: datetime + :param vm_size: The size of virtual machines in the Pool. All virtual + machines in a Pool are the same size. For information about available + sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes + in an Azure Batch Pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the Pool. This property and virtualMachineConfiguration are mutually + exclusive and one of the properties must be specified. This property + cannot be specified if the Batch Account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the Pool. This property and cloudServiceConfiguration are mutually + exclusive and one of the properties must be specified. + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param resize_timeout: The timeout for allocation of Compute Nodes to the + Pool. This is the timeout for the most recent resize operation. (The + initial sizing when the Pool is created counts as a resize.) The default + value is 15 minutes. + :type resize_timeout: timedelta + :param resize_errors: A list of errors encountered while performing the + last resize on the Pool. This property is set only if one or more errors + occurred during the last Pool resize, and only when the Pool + allocationState is Steady. + :type resize_errors: list[~azure.batch.models.ResizeError] + :param current_dedicated_nodes: The number of dedicated Compute Nodes + currently in the Pool. + :type current_dedicated_nodes: int + :param current_low_priority_nodes: The number of low-priority Compute + Nodes currently in the Pool. Low-priority Compute Nodes which have been + preempted are included in this count. + :type current_low_priority_nodes: int + :param target_dedicated_nodes: The desired number of dedicated Compute + Nodes in the Pool. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + Compute Nodes in the Pool. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the Pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + property is required and the Pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: A formula for the desired number of Compute + Nodes in the Pool. This property is set only if the Pool automatically + scales, i.e. enableAutoScale is true. + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the Pool size according to the autoscale formula. + This property is set only if the Pool automatically scales, i.e. + enableAutoScale is true. + :type auto_scale_evaluation_interval: timedelta + :param auto_scale_run: The results and errors from the last execution of + the autoscale formula. This property is set only if the Pool automatically + scales, i.e. enableAutoScale is true. + :type auto_scale_run: ~azure.batch.models.AutoScaleRun + :param enable_inter_node_communication: Whether the Pool permits direct + communication between Compute Nodes. This imposes restrictions on which + Compute Nodes can be assigned to the Pool. Specifying this value can + reduce the chance of the requested number of Compute Nodes to be allocated + in the Pool. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the Pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A Task specified to run on each Compute Node as it + joins the Pool. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: The list of Certificates to be installed on + each Compute Node in the Pool. For Windows Nodes, the Batch service + installs the Certificates to the specified Certificate store and location. + For Linux Compute Nodes, the Certificates are stored in a directory inside + the Task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this + location. For Certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and Certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of Packages to be + installed on each Compute Node in the Pool. Changes to Package references + affect all new Nodes joining the Pool, but do not affect Compute Nodes + that are already in the Pool until they are rebooted or reimaged. There is + a maximum of 10 Package references on any given Pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each Compute Node in the Pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + Pool creation will fail. + :type application_licenses: list[str] + :param max_tasks_per_node: The maximum number of Tasks that can run + concurrently on a single Compute Node in the Pool. The default value is 1. + The maximum value is the smaller of 4 times the number of cores of the + vmSize of the Pool or 256. + :type max_tasks_per_node: int + :param task_scheduling_policy: How Tasks are distributed across Compute + Nodes in a Pool. If not specified, the default is spread. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param user_accounts: The list of user Accounts to be created on each + Compute Node in the Pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the Pool as + metadata. + :type metadata: list[~azure.batch.models.MetadataItem] + :param stats: Utilization and resource usage statistics for the entire + lifetime of the Pool. This property is populated only if the CloudPool was + retrieved with an expand clause including the 'stats' attribute; otherwise + it is null. The statistics may not be immediately available. The Batch + service performs periodic roll-up of statistics. The typical delay is + about 30 minutes. + :type stats: ~azure.batch.models.PoolStatistics + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'PoolState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'allocation_state': {'key': 'allocationState', 'type': 'AllocationState'}, + 'allocation_state_transition_time': {'key': 'allocationStateTransitionTime', 'type': 'iso-8601'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'resize_errors': {'key': 'resizeErrors', 'type': '[ResizeError]'}, + 'current_dedicated_nodes': {'key': 'currentDedicatedNodes', 'type': 'int'}, + 'current_low_priority_nodes': {'key': 'currentLowPriorityNodes', 'type': 'int'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'auto_scale_run': {'key': 'autoScaleRun', 'type': 'AutoScaleRun'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'stats': {'key': 'stats', 'type': 'PoolStatistics'}, + } + + def __init__(self, **kwargs): + super(CloudPool, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.url = kwargs.get('url', None) + self.e_tag = kwargs.get('e_tag', None) + self.last_modified = kwargs.get('last_modified', None) + self.creation_time = kwargs.get('creation_time', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.allocation_state = kwargs.get('allocation_state', None) + self.allocation_state_transition_time = kwargs.get('allocation_state_transition_time', None) + self.vm_size = kwargs.get('vm_size', None) + self.cloud_service_configuration = kwargs.get('cloud_service_configuration', None) + self.virtual_machine_configuration = kwargs.get('virtual_machine_configuration', None) + self.resize_timeout = kwargs.get('resize_timeout', None) + self.resize_errors = kwargs.get('resize_errors', None) + self.current_dedicated_nodes = kwargs.get('current_dedicated_nodes', None) + self.current_low_priority_nodes = kwargs.get('current_low_priority_nodes', None) + self.target_dedicated_nodes = kwargs.get('target_dedicated_nodes', None) + self.target_low_priority_nodes = kwargs.get('target_low_priority_nodes', None) + self.enable_auto_scale = kwargs.get('enable_auto_scale', None) + self.auto_scale_formula = kwargs.get('auto_scale_formula', None) + self.auto_scale_evaluation_interval = kwargs.get('auto_scale_evaluation_interval', None) + self.auto_scale_run = kwargs.get('auto_scale_run', None) + self.enable_inter_node_communication = kwargs.get('enable_inter_node_communication', None) + self.network_configuration = kwargs.get('network_configuration', None) + self.start_task = kwargs.get('start_task', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.application_licenses = kwargs.get('application_licenses', None) + self.max_tasks_per_node = kwargs.get('max_tasks_per_node', None) + self.task_scheduling_policy = kwargs.get('task_scheduling_policy', None) + self.user_accounts = kwargs.get('user_accounts', None) + self.metadata = kwargs.get('metadata', None) + self.stats = kwargs.get('stats', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/cloud_pool_paged.py b/azext/generated/sdk/batch/v2019_06_01/models/cloud_pool_paged.py new file mode 100644 index 00000000..c23eb7cd --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/cloud_pool_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class CloudPoolPaged(Paged): + """ + A paging container for iterating over a list of :class:`CloudPool ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[CloudPool]'} + } + + def __init__(self, *args, **kwargs): + + super(CloudPoolPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/cloud_pool_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/cloud_pool_py3.py new file mode 100644 index 00000000..d71b1cfb --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/cloud_pool_py3.py @@ -0,0 +1,245 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudPool(Model): + """A Pool in the Azure Batch service. + + :param id: A string that uniquely identifies the Pool within the Account. + The ID can contain any combination of alphanumeric characters including + hyphens and underscores, and cannot contain more than 64 characters. The + ID is case-preserving and case-insensitive (that is, you may not have two + IDs within an Account that differ only by case). + :type id: str + :param display_name: The display name for the Pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param url: The URL of the Pool. + :type url: str + :param e_tag: The ETag of the Pool. This is an opaque string. You can use + it to detect whether the Pool has changed between requests. In particular, + you can be pass the ETag when updating a Pool to specify that your changes + should take effect only if nobody else has modified the Pool in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the Pool. This is the last + time at which the Pool level data, such as the targetDedicatedNodes or + enableAutoscale settings, changed. It does not factor in node-level + changes such as a Compute Node changing state. + :type last_modified: datetime + :param creation_time: The creation time of the Pool. + :type creation_time: datetime + :param state: The current state of the Pool. Possible values include: + 'active', 'deleting' + :type state: str or ~azure.batch.models.PoolState + :param state_transition_time: The time at which the Pool entered its + current state. + :type state_transition_time: datetime + :param allocation_state: Whether the Pool is resizing. Possible values + include: 'steady', 'resizing', 'stopping' + :type allocation_state: str or ~azure.batch.models.AllocationState + :param allocation_state_transition_time: The time at which the Pool + entered its current allocation state. + :type allocation_state_transition_time: datetime + :param vm_size: The size of virtual machines in the Pool. All virtual + machines in a Pool are the same size. For information about available + sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes + in an Azure Batch Pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the Pool. This property and virtualMachineConfiguration are mutually + exclusive and one of the properties must be specified. This property + cannot be specified if the Batch Account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the Pool. This property and cloudServiceConfiguration are mutually + exclusive and one of the properties must be specified. + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param resize_timeout: The timeout for allocation of Compute Nodes to the + Pool. This is the timeout for the most recent resize operation. (The + initial sizing when the Pool is created counts as a resize.) The default + value is 15 minutes. + :type resize_timeout: timedelta + :param resize_errors: A list of errors encountered while performing the + last resize on the Pool. This property is set only if one or more errors + occurred during the last Pool resize, and only when the Pool + allocationState is Steady. + :type resize_errors: list[~azure.batch.models.ResizeError] + :param current_dedicated_nodes: The number of dedicated Compute Nodes + currently in the Pool. + :type current_dedicated_nodes: int + :param current_low_priority_nodes: The number of low-priority Compute + Nodes currently in the Pool. Low-priority Compute Nodes which have been + preempted are included in this count. + :type current_low_priority_nodes: int + :param target_dedicated_nodes: The desired number of dedicated Compute + Nodes in the Pool. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + Compute Nodes in the Pool. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the Pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + property is required and the Pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: A formula for the desired number of Compute + Nodes in the Pool. This property is set only if the Pool automatically + scales, i.e. enableAutoScale is true. + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the Pool size according to the autoscale formula. + This property is set only if the Pool automatically scales, i.e. + enableAutoScale is true. + :type auto_scale_evaluation_interval: timedelta + :param auto_scale_run: The results and errors from the last execution of + the autoscale formula. This property is set only if the Pool automatically + scales, i.e. enableAutoScale is true. + :type auto_scale_run: ~azure.batch.models.AutoScaleRun + :param enable_inter_node_communication: Whether the Pool permits direct + communication between Compute Nodes. This imposes restrictions on which + Compute Nodes can be assigned to the Pool. Specifying this value can + reduce the chance of the requested number of Compute Nodes to be allocated + in the Pool. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the Pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A Task specified to run on each Compute Node as it + joins the Pool. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: The list of Certificates to be installed on + each Compute Node in the Pool. For Windows Nodes, the Batch service + installs the Certificates to the specified Certificate store and location. + For Linux Compute Nodes, the Certificates are stored in a directory inside + the Task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this + location. For Certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and Certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of Packages to be + installed on each Compute Node in the Pool. Changes to Package references + affect all new Nodes joining the Pool, but do not affect Compute Nodes + that are already in the Pool until they are rebooted or reimaged. There is + a maximum of 10 Package references on any given Pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each Compute Node in the Pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + Pool creation will fail. + :type application_licenses: list[str] + :param max_tasks_per_node: The maximum number of Tasks that can run + concurrently on a single Compute Node in the Pool. The default value is 1. + The maximum value is the smaller of 4 times the number of cores of the + vmSize of the Pool or 256. + :type max_tasks_per_node: int + :param task_scheduling_policy: How Tasks are distributed across Compute + Nodes in a Pool. If not specified, the default is spread. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param user_accounts: The list of user Accounts to be created on each + Compute Node in the Pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the Pool as + metadata. + :type metadata: list[~azure.batch.models.MetadataItem] + :param stats: Utilization and resource usage statistics for the entire + lifetime of the Pool. This property is populated only if the CloudPool was + retrieved with an expand clause including the 'stats' attribute; otherwise + it is null. The statistics may not be immediately available. The Batch + service performs periodic roll-up of statistics. The typical delay is + about 30 minutes. + :type stats: ~azure.batch.models.PoolStatistics + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'PoolState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'allocation_state': {'key': 'allocationState', 'type': 'AllocationState'}, + 'allocation_state_transition_time': {'key': 'allocationStateTransitionTime', 'type': 'iso-8601'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'resize_errors': {'key': 'resizeErrors', 'type': '[ResizeError]'}, + 'current_dedicated_nodes': {'key': 'currentDedicatedNodes', 'type': 'int'}, + 'current_low_priority_nodes': {'key': 'currentLowPriorityNodes', 'type': 'int'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'auto_scale_run': {'key': 'autoScaleRun', 'type': 'AutoScaleRun'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'stats': {'key': 'stats', 'type': 'PoolStatistics'}, + } + + def __init__(self, *, id: str=None, display_name: str=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, allocation_state=None, allocation_state_transition_time=None, vm_size: str=None, cloud_service_configuration=None, virtual_machine_configuration=None, resize_timeout=None, resize_errors=None, current_dedicated_nodes: int=None, current_low_priority_nodes: int=None, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, enable_auto_scale: bool=None, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, auto_scale_run=None, enable_inter_node_communication: bool=None, network_configuration=None, start_task=None, certificate_references=None, application_package_references=None, application_licenses=None, max_tasks_per_node: int=None, task_scheduling_policy=None, user_accounts=None, metadata=None, stats=None, **kwargs) -> None: + super(CloudPool, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.url = url + self.e_tag = e_tag + self.last_modified = last_modified + self.creation_time = creation_time + self.state = state + self.state_transition_time = state_transition_time + self.allocation_state = allocation_state + self.allocation_state_transition_time = allocation_state_transition_time + self.vm_size = vm_size + self.cloud_service_configuration = cloud_service_configuration + self.virtual_machine_configuration = virtual_machine_configuration + self.resize_timeout = resize_timeout + self.resize_errors = resize_errors + self.current_dedicated_nodes = current_dedicated_nodes + self.current_low_priority_nodes = current_low_priority_nodes + self.target_dedicated_nodes = target_dedicated_nodes + self.target_low_priority_nodes = target_low_priority_nodes + self.enable_auto_scale = enable_auto_scale + self.auto_scale_formula = auto_scale_formula + self.auto_scale_evaluation_interval = auto_scale_evaluation_interval + self.auto_scale_run = auto_scale_run + self.enable_inter_node_communication = enable_inter_node_communication + self.network_configuration = network_configuration + self.start_task = start_task + self.certificate_references = certificate_references + self.application_package_references = application_package_references + self.application_licenses = application_licenses + self.max_tasks_per_node = max_tasks_per_node + self.task_scheduling_policy = task_scheduling_policy + self.user_accounts = user_accounts + self.metadata = metadata + self.stats = stats diff --git a/azext/generated/sdk/batch/v2019_06_01/models/cloud_service_configuration.py b/azext/generated/sdk/batch/v2019_06_01/models/cloud_service_configuration.py new file mode 100644 index 00000000..b22fe7a3 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/cloud_service_configuration.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudServiceConfiguration(Model): + """The configuration for Compute Nodes in a Pool based on the Azure Cloud + Services platform. + + All required parameters must be populated in order to send to Azure. + + :param os_family: Required. The Azure Guest OS family to be installed on + the virtual machines in the Pool. Possible values are: + 2 - OS Family 2, equivalent to Windows Server 2008 R2 SP1. + 3 - OS Family 3, equivalent to Windows Server 2012. + 4 - OS Family 4, equivalent to Windows Server 2012 R2. + 5 - OS Family 5, equivalent to Windows Server 2016. + 6 - OS Family 6, equivalent to Windows Server 2019. For more information, + see Azure Guest OS Releases + (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). + :type os_family: str + :param os_version: The Azure Guest OS version to be installed on the + virtual machines in the Pool. The default value is * which specifies the + latest operating system version for the specified OS family. + :type os_version: str + """ + + _validation = { + 'os_family': {'required': True}, + } + + _attribute_map = { + 'os_family': {'key': 'osFamily', 'type': 'str'}, + 'os_version': {'key': 'osVersion', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(CloudServiceConfiguration, self).__init__(**kwargs) + self.os_family = kwargs.get('os_family', None) + self.os_version = kwargs.get('os_version', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/cloud_service_configuration_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/cloud_service_configuration_py3.py new file mode 100644 index 00000000..0e41d6c0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/cloud_service_configuration_py3.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudServiceConfiguration(Model): + """The configuration for Compute Nodes in a Pool based on the Azure Cloud + Services platform. + + All required parameters must be populated in order to send to Azure. + + :param os_family: Required. The Azure Guest OS family to be installed on + the virtual machines in the Pool. Possible values are: + 2 - OS Family 2, equivalent to Windows Server 2008 R2 SP1. + 3 - OS Family 3, equivalent to Windows Server 2012. + 4 - OS Family 4, equivalent to Windows Server 2012 R2. + 5 - OS Family 5, equivalent to Windows Server 2016. + 6 - OS Family 6, equivalent to Windows Server 2019. For more information, + see Azure Guest OS Releases + (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). + :type os_family: str + :param os_version: The Azure Guest OS version to be installed on the + virtual machines in the Pool. The default value is * which specifies the + latest operating system version for the specified OS family. + :type os_version: str + """ + + _validation = { + 'os_family': {'required': True}, + } + + _attribute_map = { + 'os_family': {'key': 'osFamily', 'type': 'str'}, + 'os_version': {'key': 'osVersion', 'type': 'str'}, + } + + def __init__(self, *, os_family: str, os_version: str=None, **kwargs) -> None: + super(CloudServiceConfiguration, self).__init__(**kwargs) + self.os_family = os_family + self.os_version = os_version diff --git a/azext/generated/sdk/batch/v2019_06_01/models/cloud_task.py b/azext/generated/sdk/batch/v2019_06_01/models/cloud_task.py new file mode 100644 index 00000000..e460c092 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/cloud_task.py @@ -0,0 +1,211 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudTask(Model): + """An Azure Batch Task. + + Batch will retry Tasks when a recovery operation is triggered on a Node. + Examples of recovery operations include (but are not limited to) when an + unhealthy Node is rebooted or a Compute Node disappeared due to host + failure. Retries due to recovery operations are independent of and are not + counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, + an internal retry due to a recovery operation may occur. Because of this, + all Tasks should be idempotent. This means Tasks need to tolerate being + interrupted and restarted without causing any corruption or duplicate data. + The best practice for long running Tasks is to use some form of + checkpointing. + + :param id: A string that uniquely identifies the Task within the Job. The + ID can contain any combination of alphanumeric characters including + hyphens and underscores, and cannot contain more than 64 characters. + :type id: str + :param display_name: A display name for the Task. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param url: The URL of the Task. + :type url: str + :param e_tag: The ETag of the Task. This is an opaque string. You can use + it to detect whether the Task has changed between requests. In particular, + you can be pass the ETag when updating a Task to specify that your changes + should take effect only if nobody else has modified the Task in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the Task. + :type last_modified: datetime + :param creation_time: The creation time of the Task. + :type creation_time: datetime + :param exit_conditions: How the Batch service should respond when the Task + completes. + :type exit_conditions: ~azure.batch.models.ExitConditions + :param state: The current state of the Task. Possible values include: + 'active', 'preparing', 'running', 'completed' + :type state: str or ~azure.batch.models.TaskState + :param state_transition_time: The time at which the Task entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the Task. This property is + not set if the Task is in its initial Active state. Possible values + include: 'active', 'preparing', 'running', 'completed' + :type previous_state: str or ~azure.batch.models.TaskState + :param previous_state_transition_time: The time at which the Task entered + its previous state. This property is not set if the Task is in its initial + Active state. + :type previous_state_transition_time: datetime + :param command_line: The command line of the Task. For multi-instance + Tasks, the command line is executed as the primary Task, after the primary + Task and all subtasks have finished executing the coordination command + line. The command line does not run under a shell, and therefore cannot + take advantage of shell features such as environment variable expansion. + If you want to take advantage of such features, you should invoke the + shell in the command line, for example using "cmd /c MyCommand" in Windows + or "/bin/sh -c MyCommand" in Linux. If the command line refers to file + paths, it should use a relative path (relative to the Task working + directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Task runs. If the Pool that will run this Task has containerConfiguration + set, this must be set as well. If the Pool that will run this Task doesn't + have containerConfiguration set, this must not be set. When this is + specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR + (the root of Azure Batch directories on the node) are mapped into the + container, all Task environment variables are mapped into the container, + and the Task command line is executed in the container. Files produced in + the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to + the host disk, meaning that Batch file APIs will not be able to access + those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. For + multi-instance Tasks, the resource files will only be downloaded to the + Compute Node on which the primary Task is executed. There is a maximum + size for the list of resource files. When the max size is exceeded, the + request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the Compute Node after running the command line. For multi-instance + Tasks, the files will only be uploaded from the Compute Node on which the + primary Task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the Task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param affinity_info: A locality hint that can be used by the Batch + service to select a Compute Node on which to start the new Task. + :type affinity_info: ~azure.batch.models.AffinityInformation + :param constraints: The execution constraints that apply to this Task. + :type constraints: ~azure.batch.models.TaskConstraints + :param user_identity: The user identity under which the Task runs. If + omitted, the Task runs as a non-administrative user unique to the Task. + :type user_identity: ~azure.batch.models.UserIdentity + :param execution_info: Information about the execution of the Task. + :type execution_info: ~azure.batch.models.TaskExecutionInformation + :param node_info: Information about the Compute Node on which the Task + ran. + :type node_info: ~azure.batch.models.ComputeNodeInformation + :param multi_instance_settings: An object that indicates that the Task is + a multi-instance Task, and contains information about how to run the + multi-instance Task. + :type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings + :param stats: Resource usage statistics for the Task. + :type stats: ~azure.batch.models.TaskStatistics + :param depends_on: The Tasks that this Task depends on. This Task will not + be scheduled until all Tasks that it depends on have completed + successfully. If any of those Tasks fail and exhaust their retry counts, + this Task will never be scheduled. + :type depends_on: ~azure.batch.models.TaskDependencies + :param application_package_references: A list of Packages that the Batch + service will deploy to the Compute Node before running the command line. + Application packages are downloaded and deployed to a shared directory, + not the Task working directory. Therefore, if a referenced package is + already on the Node, and is up to date, then it is not re-downloaded; the + existing copy on the Compute Node is used. If a referenced Package cannot + be installed, for example because the package has been deleted or because + download failed, the Task fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the Task can use to perform Batch service operations. If this + property is set, the Batch service provides the Task with an + authentication token which can be used to authenticate Batch service + operations without requiring an Account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the Task can carry out using the token depend on the settings. For + example, a Task can request Job permissions in order to add other Tasks to + the Job, or check the status of the Job or of other Tasks under the Job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'}, + 'state': {'key': 'state', 'type': 'TaskState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'TaskState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'execution_info': {'key': 'executionInfo', 'type': 'TaskExecutionInformation'}, + 'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'}, + 'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'}, + 'stats': {'key': 'stats', 'type': 'TaskStatistics'}, + 'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + } + + def __init__(self, **kwargs): + super(CloudTask, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.url = kwargs.get('url', None) + self.e_tag = kwargs.get('e_tag', None) + self.last_modified = kwargs.get('last_modified', None) + self.creation_time = kwargs.get('creation_time', None) + self.exit_conditions = kwargs.get('exit_conditions', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.previous_state = kwargs.get('previous_state', None) + self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.resource_files = kwargs.get('resource_files', None) + self.output_files = kwargs.get('output_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.affinity_info = kwargs.get('affinity_info', None) + self.constraints = kwargs.get('constraints', None) + self.user_identity = kwargs.get('user_identity', None) + self.execution_info = kwargs.get('execution_info', None) + self.node_info = kwargs.get('node_info', None) + self.multi_instance_settings = kwargs.get('multi_instance_settings', None) + self.stats = kwargs.get('stats', None) + self.depends_on = kwargs.get('depends_on', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.authentication_token_settings = kwargs.get('authentication_token_settings', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/cloud_task_list_subtasks_result.py b/azext/generated/sdk/batch/v2019_06_01/models/cloud_task_list_subtasks_result.py new file mode 100644 index 00000000..c892bfe0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/cloud_task_list_subtasks_result.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudTaskListSubtasksResult(Model): + """The result of listing the subtasks of a Task. + + :param value: The list of subtasks. + :type value: list[~azure.batch.models.SubtaskInformation] + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[SubtaskInformation]'}, + } + + def __init__(self, **kwargs): + super(CloudTaskListSubtasksResult, self).__init__(**kwargs) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/cloud_task_list_subtasks_result_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/cloud_task_list_subtasks_result_py3.py new file mode 100644 index 00000000..3ab59743 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/cloud_task_list_subtasks_result_py3.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudTaskListSubtasksResult(Model): + """The result of listing the subtasks of a Task. + + :param value: The list of subtasks. + :type value: list[~azure.batch.models.SubtaskInformation] + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[SubtaskInformation]'}, + } + + def __init__(self, *, value=None, **kwargs) -> None: + super(CloudTaskListSubtasksResult, self).__init__(**kwargs) + self.value = value diff --git a/azext/generated/sdk/batch/v2019_06_01/models/cloud_task_paged.py b/azext/generated/sdk/batch/v2019_06_01/models/cloud_task_paged.py new file mode 100644 index 00000000..3d8ef774 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/cloud_task_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class CloudTaskPaged(Paged): + """ + A paging container for iterating over a list of :class:`CloudTask ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[CloudTask]'} + } + + def __init__(self, *args, **kwargs): + + super(CloudTaskPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/cloud_task_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/cloud_task_py3.py new file mode 100644 index 00000000..096f3717 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/cloud_task_py3.py @@ -0,0 +1,211 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudTask(Model): + """An Azure Batch Task. + + Batch will retry Tasks when a recovery operation is triggered on a Node. + Examples of recovery operations include (but are not limited to) when an + unhealthy Node is rebooted or a Compute Node disappeared due to host + failure. Retries due to recovery operations are independent of and are not + counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, + an internal retry due to a recovery operation may occur. Because of this, + all Tasks should be idempotent. This means Tasks need to tolerate being + interrupted and restarted without causing any corruption or duplicate data. + The best practice for long running Tasks is to use some form of + checkpointing. + + :param id: A string that uniquely identifies the Task within the Job. The + ID can contain any combination of alphanumeric characters including + hyphens and underscores, and cannot contain more than 64 characters. + :type id: str + :param display_name: A display name for the Task. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param url: The URL of the Task. + :type url: str + :param e_tag: The ETag of the Task. This is an opaque string. You can use + it to detect whether the Task has changed between requests. In particular, + you can be pass the ETag when updating a Task to specify that your changes + should take effect only if nobody else has modified the Task in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the Task. + :type last_modified: datetime + :param creation_time: The creation time of the Task. + :type creation_time: datetime + :param exit_conditions: How the Batch service should respond when the Task + completes. + :type exit_conditions: ~azure.batch.models.ExitConditions + :param state: The current state of the Task. Possible values include: + 'active', 'preparing', 'running', 'completed' + :type state: str or ~azure.batch.models.TaskState + :param state_transition_time: The time at which the Task entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the Task. This property is + not set if the Task is in its initial Active state. Possible values + include: 'active', 'preparing', 'running', 'completed' + :type previous_state: str or ~azure.batch.models.TaskState + :param previous_state_transition_time: The time at which the Task entered + its previous state. This property is not set if the Task is in its initial + Active state. + :type previous_state_transition_time: datetime + :param command_line: The command line of the Task. For multi-instance + Tasks, the command line is executed as the primary Task, after the primary + Task and all subtasks have finished executing the coordination command + line. The command line does not run under a shell, and therefore cannot + take advantage of shell features such as environment variable expansion. + If you want to take advantage of such features, you should invoke the + shell in the command line, for example using "cmd /c MyCommand" in Windows + or "/bin/sh -c MyCommand" in Linux. If the command line refers to file + paths, it should use a relative path (relative to the Task working + directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Task runs. If the Pool that will run this Task has containerConfiguration + set, this must be set as well. If the Pool that will run this Task doesn't + have containerConfiguration set, this must not be set. When this is + specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR + (the root of Azure Batch directories on the node) are mapped into the + container, all Task environment variables are mapped into the container, + and the Task command line is executed in the container. Files produced in + the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to + the host disk, meaning that Batch file APIs will not be able to access + those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. For + multi-instance Tasks, the resource files will only be downloaded to the + Compute Node on which the primary Task is executed. There is a maximum + size for the list of resource files. When the max size is exceeded, the + request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the Compute Node after running the command line. For multi-instance + Tasks, the files will only be uploaded from the Compute Node on which the + primary Task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the Task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param affinity_info: A locality hint that can be used by the Batch + service to select a Compute Node on which to start the new Task. + :type affinity_info: ~azure.batch.models.AffinityInformation + :param constraints: The execution constraints that apply to this Task. + :type constraints: ~azure.batch.models.TaskConstraints + :param user_identity: The user identity under which the Task runs. If + omitted, the Task runs as a non-administrative user unique to the Task. + :type user_identity: ~azure.batch.models.UserIdentity + :param execution_info: Information about the execution of the Task. + :type execution_info: ~azure.batch.models.TaskExecutionInformation + :param node_info: Information about the Compute Node on which the Task + ran. + :type node_info: ~azure.batch.models.ComputeNodeInformation + :param multi_instance_settings: An object that indicates that the Task is + a multi-instance Task, and contains information about how to run the + multi-instance Task. + :type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings + :param stats: Resource usage statistics for the Task. + :type stats: ~azure.batch.models.TaskStatistics + :param depends_on: The Tasks that this Task depends on. This Task will not + be scheduled until all Tasks that it depends on have completed + successfully. If any of those Tasks fail and exhaust their retry counts, + this Task will never be scheduled. + :type depends_on: ~azure.batch.models.TaskDependencies + :param application_package_references: A list of Packages that the Batch + service will deploy to the Compute Node before running the command line. + Application packages are downloaded and deployed to a shared directory, + not the Task working directory. Therefore, if a referenced package is + already on the Node, and is up to date, then it is not re-downloaded; the + existing copy on the Compute Node is used. If a referenced Package cannot + be installed, for example because the package has been deleted or because + download failed, the Task fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the Task can use to perform Batch service operations. If this + property is set, the Batch service provides the Task with an + authentication token which can be used to authenticate Batch service + operations without requiring an Account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the Task can carry out using the token depend on the settings. For + example, a Task can request Job permissions in order to add other Tasks to + the Job, or check the status of the Job or of other Tasks under the Job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'}, + 'state': {'key': 'state', 'type': 'TaskState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'TaskState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'execution_info': {'key': 'executionInfo', 'type': 'TaskExecutionInformation'}, + 'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'}, + 'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'}, + 'stats': {'key': 'stats', 'type': 'TaskStatistics'}, + 'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + } + + def __init__(self, *, id: str=None, display_name: str=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, exit_conditions=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, command_line: str=None, container_settings=None, resource_files=None, output_files=None, environment_settings=None, affinity_info=None, constraints=None, user_identity=None, execution_info=None, node_info=None, multi_instance_settings=None, stats=None, depends_on=None, application_package_references=None, authentication_token_settings=None, **kwargs) -> None: + super(CloudTask, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.url = url + self.e_tag = e_tag + self.last_modified = last_modified + self.creation_time = creation_time + self.exit_conditions = exit_conditions + self.state = state + self.state_transition_time = state_transition_time + self.previous_state = previous_state + self.previous_state_transition_time = previous_state_transition_time + self.command_line = command_line + self.container_settings = container_settings + self.resource_files = resource_files + self.output_files = output_files + self.environment_settings = environment_settings + self.affinity_info = affinity_info + self.constraints = constraints + self.user_identity = user_identity + self.execution_info = execution_info + self.node_info = node_info + self.multi_instance_settings = multi_instance_settings + self.stats = stats + self.depends_on = depends_on + self.application_package_references = application_package_references + self.authentication_token_settings = authentication_token_settings diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node.py new file mode 100644 index 00000000..f807bad4 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node.py @@ -0,0 +1,160 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNode(Model): + """A Compute Node in the Batch service. + + :param id: The ID of the Compute Node. Every Compute Node that is added to + a Pool is assigned a unique ID. Whenever a Compute Node is removed from a + Pool, all of its local files are deleted, and the ID is reclaimed and + could be reused for new Compute Nodes. + :type id: str + :param url: The URL of the Compute Node. + :type url: str + :param state: The current state of the Compute Node. The low-priority + Compute Node has been preempted. Tasks which were running on the Compute + Node when it was preempted will be rescheduled when another Compute Node + becomes available. Possible values include: 'idle', 'rebooting', + 'reimaging', 'running', 'unusable', 'creating', 'starting', + 'waitingForStartTask', 'startTaskFailed', 'unknown', 'leavingPool', + 'offline', 'preempted' + :type state: str or ~azure.batch.models.ComputeNodeState + :param scheduling_state: Whether the Compute Node is available for Task + scheduling. Possible values include: 'enabled', 'disabled' + :type scheduling_state: str or ~azure.batch.models.SchedulingState + :param state_transition_time: The time at which the Compute Node entered + its current state. + :type state_transition_time: datetime + :param last_boot_time: The last time at which the Compute Node was + started. This property may not be present if the Compute Node state is + unusable. + :type last_boot_time: datetime + :param allocation_time: The time at which this Compute Node was allocated + to the Pool. This is the time when the Compute Node was initially + allocated and doesn't change once set. It is not updated when the Compute + Node is service healed or preempted. + :type allocation_time: datetime + :param ip_address: The IP address that other Nodes can use to communicate + with this Compute Node. Every Compute Node that is added to a Pool is + assigned a unique IP address. Whenever a Compute Node is removed from a + Pool, all of its local files are deleted, and the IP address is reclaimed + and could be reused for new Compute Nodes. + :type ip_address: str + :param affinity_id: An identifier which can be passed when adding a Task + to request that the Task be scheduled on this Compute Node. Note that this + is just a soft affinity. If the target Compute Node is busy or unavailable + at the time the Task is scheduled, then the Task will be scheduled + elsewhere. + :type affinity_id: str + :param vm_size: The size of the virtual machine hosting the Compute Node. + For information about available sizes of virtual machines in Pools, see + Choose a VM size for Compute Nodes in an Azure Batch Pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param total_tasks_run: The total number of Job Tasks completed on the + Compute Node. This includes Job Manager Tasks and normal Tasks, but not + Job Preparation, Job Release or Start Tasks. + :type total_tasks_run: int + :param running_tasks_count: The total number of currently running Job + Tasks on the Compute Node. This includes Job Manager Tasks and normal + Tasks, but not Job Preparation, Job Release or Start Tasks. + :type running_tasks_count: int + :param total_tasks_succeeded: The total number of Job Tasks which + completed successfully (with exitCode 0) on the Compute Node. This + includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job + Release or Start Tasks. + :type total_tasks_succeeded: int + :param recent_tasks: A list of Tasks whose state has recently changed. + This property is present only if at least one Task has run on this Compute + Node since it was assigned to the Pool. + :type recent_tasks: list[~azure.batch.models.TaskInformation] + :param start_task: The Task specified to run on the Compute Node as it + joins the Pool. + :type start_task: ~azure.batch.models.StartTask + :param start_task_info: Runtime information about the execution of the + start Task on the Compute Node. + :type start_task_info: ~azure.batch.models.StartTaskInformation + :param certificate_references: The list of Certificates installed on the + Compute Node. For Windows Nodes, the Batch service installs the + Certificates to the specified Certificate store and location. For Linux + Compute Nodes, the Certificates are stored in a directory inside the Task + working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is + supplied to the Task to query for this location. For Certificates with + visibility of 'remoteUser', a 'certs' directory is created in the user's + home directory (e.g., /home/{user-name}/certs) and Certificates are placed + in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param errors: The list of errors that are currently being encountered by + the Compute Node. + :type errors: list[~azure.batch.models.ComputeNodeError] + :param is_dedicated: Whether this Compute Node is a dedicated Compute + Node. If false, the Compute Node is a low-priority Compute Node. + :type is_dedicated: bool + :param endpoint_configuration: The endpoint configuration for the Compute + Node. + :type endpoint_configuration: + ~azure.batch.models.ComputeNodeEndpointConfiguration + :param node_agent_info: Information about the Compute Node agent version + and the time the Compute Node upgraded to a new version. + :type node_agent_info: ~azure.batch.models.NodeAgentInformation + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'ComputeNodeState'}, + 'scheduling_state': {'key': 'schedulingState', 'type': 'SchedulingState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'last_boot_time': {'key': 'lastBootTime', 'type': 'iso-8601'}, + 'allocation_time': {'key': 'allocationTime', 'type': 'iso-8601'}, + 'ip_address': {'key': 'ipAddress', 'type': 'str'}, + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'total_tasks_run': {'key': 'totalTasksRun', 'type': 'int'}, + 'running_tasks_count': {'key': 'runningTasksCount', 'type': 'int'}, + 'total_tasks_succeeded': {'key': 'totalTasksSucceeded', 'type': 'int'}, + 'recent_tasks': {'key': 'recentTasks', 'type': '[TaskInformation]'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'start_task_info': {'key': 'startTaskInfo', 'type': 'StartTaskInformation'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'errors': {'key': 'errors', 'type': '[ComputeNodeError]'}, + 'is_dedicated': {'key': 'isDedicated', 'type': 'bool'}, + 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'ComputeNodeEndpointConfiguration'}, + 'node_agent_info': {'key': 'nodeAgentInfo', 'type': 'NodeAgentInformation'}, + } + + def __init__(self, **kwargs): + super(ComputeNode, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.url = kwargs.get('url', None) + self.state = kwargs.get('state', None) + self.scheduling_state = kwargs.get('scheduling_state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.last_boot_time = kwargs.get('last_boot_time', None) + self.allocation_time = kwargs.get('allocation_time', None) + self.ip_address = kwargs.get('ip_address', None) + self.affinity_id = kwargs.get('affinity_id', None) + self.vm_size = kwargs.get('vm_size', None) + self.total_tasks_run = kwargs.get('total_tasks_run', None) + self.running_tasks_count = kwargs.get('running_tasks_count', None) + self.total_tasks_succeeded = kwargs.get('total_tasks_succeeded', None) + self.recent_tasks = kwargs.get('recent_tasks', None) + self.start_task = kwargs.get('start_task', None) + self.start_task_info = kwargs.get('start_task_info', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.errors = kwargs.get('errors', None) + self.is_dedicated = kwargs.get('is_dedicated', None) + self.endpoint_configuration = kwargs.get('endpoint_configuration', None) + self.node_agent_info = kwargs.get('node_agent_info', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_add_user_options.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_add_user_options.py new file mode 100644 index 00000000..89020475 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_add_user_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeAddUserOptions(Model): + """Additional parameters for add_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeAddUserOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_add_user_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_add_user_options_py3.py new file mode 100644 index 00000000..dab4040b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_add_user_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeAddUserOptions(Model): + """Additional parameters for add_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeAddUserOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_delete_user_options.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_delete_user_options.py new file mode 100644 index 00000000..4874a98a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_delete_user_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeDeleteUserOptions(Model): + """Additional parameters for delete_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeDeleteUserOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_delete_user_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_delete_user_options_py3.py new file mode 100644 index 00000000..88217b93 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_delete_user_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeDeleteUserOptions(Model): + """Additional parameters for delete_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeDeleteUserOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_disable_scheduling_options.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_disable_scheduling_options.py new file mode 100644 index 00000000..92bf2911 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_disable_scheduling_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeDisableSchedulingOptions(Model): + """Additional parameters for disable_scheduling operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeDisableSchedulingOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_disable_scheduling_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_disable_scheduling_options_py3.py new file mode 100644 index 00000000..0432c5db --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_disable_scheduling_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeDisableSchedulingOptions(Model): + """Additional parameters for disable_scheduling operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeDisableSchedulingOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_enable_scheduling_options.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_enable_scheduling_options.py new file mode 100644 index 00000000..905e3e34 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_enable_scheduling_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeEnableSchedulingOptions(Model): + """Additional parameters for enable_scheduling operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeEnableSchedulingOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_enable_scheduling_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_enable_scheduling_options_py3.py new file mode 100644 index 00000000..4ef5d9ad --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_enable_scheduling_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeEnableSchedulingOptions(Model): + """Additional parameters for enable_scheduling operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeEnableSchedulingOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_endpoint_configuration.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_endpoint_configuration.py new file mode 100644 index 00000000..ca48b8f1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_endpoint_configuration.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeEndpointConfiguration(Model): + """The endpoint configuration for the Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param inbound_endpoints: Required. The list of inbound endpoints that are + accessible on the Compute Node. + :type inbound_endpoints: list[~azure.batch.models.InboundEndpoint] + """ + + _validation = { + 'inbound_endpoints': {'required': True}, + } + + _attribute_map = { + 'inbound_endpoints': {'key': 'inboundEndpoints', 'type': '[InboundEndpoint]'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeEndpointConfiguration, self).__init__(**kwargs) + self.inbound_endpoints = kwargs.get('inbound_endpoints', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_endpoint_configuration_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_endpoint_configuration_py3.py new file mode 100644 index 00000000..4a29c553 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_endpoint_configuration_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeEndpointConfiguration(Model): + """The endpoint configuration for the Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param inbound_endpoints: Required. The list of inbound endpoints that are + accessible on the Compute Node. + :type inbound_endpoints: list[~azure.batch.models.InboundEndpoint] + """ + + _validation = { + 'inbound_endpoints': {'required': True}, + } + + _attribute_map = { + 'inbound_endpoints': {'key': 'inboundEndpoints', 'type': '[InboundEndpoint]'}, + } + + def __init__(self, *, inbound_endpoints, **kwargs) -> None: + super(ComputeNodeEndpointConfiguration, self).__init__(**kwargs) + self.inbound_endpoints = inbound_endpoints diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_error.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_error.py new file mode 100644 index 00000000..eb284933 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_error.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeError(Model): + """An error encountered by a Compute Node. + + :param code: An identifier for the Compute Node error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the Compute Node error, intended to + be suitable for display in a user interface. + :type message: str + :param error_details: The list of additional error details related to the + Compute Node error. + :type error_details: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'error_details': {'key': 'errorDetails', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.error_details = kwargs.get('error_details', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_error_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_error_py3.py new file mode 100644 index 00000000..ef0a84da --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_error_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeError(Model): + """An error encountered by a Compute Node. + + :param code: An identifier for the Compute Node error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the Compute Node error, intended to + be suitable for display in a user interface. + :type message: str + :param error_details: The list of additional error details related to the + Compute Node error. + :type error_details: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'error_details': {'key': 'errorDetails', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, code: str=None, message: str=None, error_details=None, **kwargs) -> None: + super(ComputeNodeError, self).__init__(**kwargs) + self.code = code + self.message = message + self.error_details = error_details diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_options.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_options.py new file mode 100644 index 00000000..6218d444 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_options.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_options_py3.py new file mode 100644 index 00000000..de6284b3 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_options_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeGetOptions, self).__init__(**kwargs) + self.select = select + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_remote_desktop_options.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_remote_desktop_options.py new file mode 100644 index 00000000..20af5558 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_remote_desktop_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteDesktopOptions(Model): + """Additional parameters for get_remote_desktop operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeGetRemoteDesktopOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_remote_desktop_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_remote_desktop_options_py3.py new file mode 100644 index 00000000..d79ce622 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_remote_desktop_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteDesktopOptions(Model): + """Additional parameters for get_remote_desktop operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeGetRemoteDesktopOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_remote_login_settings_options.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_remote_login_settings_options.py new file mode 100644 index 00000000..9c01ed5f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_remote_login_settings_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteLoginSettingsOptions(Model): + """Additional parameters for get_remote_login_settings operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeGetRemoteLoginSettingsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_remote_login_settings_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_remote_login_settings_options_py3.py new file mode 100644 index 00000000..2d7987ab --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_remote_login_settings_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteLoginSettingsOptions(Model): + """Additional parameters for get_remote_login_settings operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeGetRemoteLoginSettingsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_remote_login_settings_result.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_remote_login_settings_result.py new file mode 100644 index 00000000..56060d8b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_remote_login_settings_result.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteLoginSettingsResult(Model): + """The remote login settings for a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param remote_login_ip_address: Required. The IP address used for remote + login to the Compute Node. + :type remote_login_ip_address: str + :param remote_login_port: Required. The port used for remote login to the + Compute Node. + :type remote_login_port: int + """ + + _validation = { + 'remote_login_ip_address': {'required': True}, + 'remote_login_port': {'required': True}, + } + + _attribute_map = { + 'remote_login_ip_address': {'key': 'remoteLoginIPAddress', 'type': 'str'}, + 'remote_login_port': {'key': 'remoteLoginPort', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeGetRemoteLoginSettingsResult, self).__init__(**kwargs) + self.remote_login_ip_address = kwargs.get('remote_login_ip_address', None) + self.remote_login_port = kwargs.get('remote_login_port', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_remote_login_settings_result_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_remote_login_settings_result_py3.py new file mode 100644 index 00000000..a2120536 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_get_remote_login_settings_result_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteLoginSettingsResult(Model): + """The remote login settings for a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param remote_login_ip_address: Required. The IP address used for remote + login to the Compute Node. + :type remote_login_ip_address: str + :param remote_login_port: Required. The port used for remote login to the + Compute Node. + :type remote_login_port: int + """ + + _validation = { + 'remote_login_ip_address': {'required': True}, + 'remote_login_port': {'required': True}, + } + + _attribute_map = { + 'remote_login_ip_address': {'key': 'remoteLoginIPAddress', 'type': 'str'}, + 'remote_login_port': {'key': 'remoteLoginPort', 'type': 'int'}, + } + + def __init__(self, *, remote_login_ip_address: str, remote_login_port: int, **kwargs) -> None: + super(ComputeNodeGetRemoteLoginSettingsResult, self).__init__(**kwargs) + self.remote_login_ip_address = remote_login_ip_address + self.remote_login_port = remote_login_port diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_information.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_information.py new file mode 100644 index 00000000..0f9677ac --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_information.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeInformation(Model): + """Information about the Compute Node on which a Task ran. + + :param affinity_id: An identifier for the Node on which the Task ran, + which can be passed when adding a Task to request that the Task be + scheduled on this Compute Node. + :type affinity_id: str + :param node_url: The URL of the Compute Node on which the Task ran. . + :type node_url: str + :param pool_id: The ID of the Pool on which the Task ran. + :type pool_id: str + :param node_id: The ID of the Compute Node on which the Task ran. + :type node_id: str + :param task_root_directory: The root directory of the Task on the Compute + Node. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the Task + on the Compute Node. + :type task_root_directory_url: str + """ + + _attribute_map = { + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + 'node_url': {'key': 'nodeUrl', 'type': 'str'}, + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'node_id': {'key': 'nodeId', 'type': 'str'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeInformation, self).__init__(**kwargs) + self.affinity_id = kwargs.get('affinity_id', None) + self.node_url = kwargs.get('node_url', None) + self.pool_id = kwargs.get('pool_id', None) + self.node_id = kwargs.get('node_id', None) + self.task_root_directory = kwargs.get('task_root_directory', None) + self.task_root_directory_url = kwargs.get('task_root_directory_url', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_information_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_information_py3.py new file mode 100644 index 00000000..a40ca517 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_information_py3.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeInformation(Model): + """Information about the Compute Node on which a Task ran. + + :param affinity_id: An identifier for the Node on which the Task ran, + which can be passed when adding a Task to request that the Task be + scheduled on this Compute Node. + :type affinity_id: str + :param node_url: The URL of the Compute Node on which the Task ran. . + :type node_url: str + :param pool_id: The ID of the Pool on which the Task ran. + :type pool_id: str + :param node_id: The ID of the Compute Node on which the Task ran. + :type node_id: str + :param task_root_directory: The root directory of the Task on the Compute + Node. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the Task + on the Compute Node. + :type task_root_directory_url: str + """ + + _attribute_map = { + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + 'node_url': {'key': 'nodeUrl', 'type': 'str'}, + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'node_id': {'key': 'nodeId', 'type': 'str'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + } + + def __init__(self, *, affinity_id: str=None, node_url: str=None, pool_id: str=None, node_id: str=None, task_root_directory: str=None, task_root_directory_url: str=None, **kwargs) -> None: + super(ComputeNodeInformation, self).__init__(**kwargs) + self.affinity_id = affinity_id + self.node_url = node_url + self.pool_id = pool_id + self.node_id = node_id + self.task_root_directory = task_root_directory + self.task_root_directory_url = task_root_directory_url diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_list_options.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_list_options.py new file mode 100644 index 00000000..a8e5602b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_list_options.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Compute Nodes can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_list_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_list_options_py3.py new file mode 100644 index 00000000..323bf2f5 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_list_options_py3.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Compute Nodes can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_paged.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_paged.py new file mode 100644 index 00000000..26f41dcb --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class ComputeNodePaged(Paged): + """ + A paging container for iterating over a list of :class:`ComputeNode ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[ComputeNode]'} + } + + def __init__(self, *args, **kwargs): + + super(ComputeNodePaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_py3.py new file mode 100644 index 00000000..fd3ef411 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_py3.py @@ -0,0 +1,160 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNode(Model): + """A Compute Node in the Batch service. + + :param id: The ID of the Compute Node. Every Compute Node that is added to + a Pool is assigned a unique ID. Whenever a Compute Node is removed from a + Pool, all of its local files are deleted, and the ID is reclaimed and + could be reused for new Compute Nodes. + :type id: str + :param url: The URL of the Compute Node. + :type url: str + :param state: The current state of the Compute Node. The low-priority + Compute Node has been preempted. Tasks which were running on the Compute + Node when it was preempted will be rescheduled when another Compute Node + becomes available. Possible values include: 'idle', 'rebooting', + 'reimaging', 'running', 'unusable', 'creating', 'starting', + 'waitingForStartTask', 'startTaskFailed', 'unknown', 'leavingPool', + 'offline', 'preempted' + :type state: str or ~azure.batch.models.ComputeNodeState + :param scheduling_state: Whether the Compute Node is available for Task + scheduling. Possible values include: 'enabled', 'disabled' + :type scheduling_state: str or ~azure.batch.models.SchedulingState + :param state_transition_time: The time at which the Compute Node entered + its current state. + :type state_transition_time: datetime + :param last_boot_time: The last time at which the Compute Node was + started. This property may not be present if the Compute Node state is + unusable. + :type last_boot_time: datetime + :param allocation_time: The time at which this Compute Node was allocated + to the Pool. This is the time when the Compute Node was initially + allocated and doesn't change once set. It is not updated when the Compute + Node is service healed or preempted. + :type allocation_time: datetime + :param ip_address: The IP address that other Nodes can use to communicate + with this Compute Node. Every Compute Node that is added to a Pool is + assigned a unique IP address. Whenever a Compute Node is removed from a + Pool, all of its local files are deleted, and the IP address is reclaimed + and could be reused for new Compute Nodes. + :type ip_address: str + :param affinity_id: An identifier which can be passed when adding a Task + to request that the Task be scheduled on this Compute Node. Note that this + is just a soft affinity. If the target Compute Node is busy or unavailable + at the time the Task is scheduled, then the Task will be scheduled + elsewhere. + :type affinity_id: str + :param vm_size: The size of the virtual machine hosting the Compute Node. + For information about available sizes of virtual machines in Pools, see + Choose a VM size for Compute Nodes in an Azure Batch Pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param total_tasks_run: The total number of Job Tasks completed on the + Compute Node. This includes Job Manager Tasks and normal Tasks, but not + Job Preparation, Job Release or Start Tasks. + :type total_tasks_run: int + :param running_tasks_count: The total number of currently running Job + Tasks on the Compute Node. This includes Job Manager Tasks and normal + Tasks, but not Job Preparation, Job Release or Start Tasks. + :type running_tasks_count: int + :param total_tasks_succeeded: The total number of Job Tasks which + completed successfully (with exitCode 0) on the Compute Node. This + includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job + Release or Start Tasks. + :type total_tasks_succeeded: int + :param recent_tasks: A list of Tasks whose state has recently changed. + This property is present only if at least one Task has run on this Compute + Node since it was assigned to the Pool. + :type recent_tasks: list[~azure.batch.models.TaskInformation] + :param start_task: The Task specified to run on the Compute Node as it + joins the Pool. + :type start_task: ~azure.batch.models.StartTask + :param start_task_info: Runtime information about the execution of the + start Task on the Compute Node. + :type start_task_info: ~azure.batch.models.StartTaskInformation + :param certificate_references: The list of Certificates installed on the + Compute Node. For Windows Nodes, the Batch service installs the + Certificates to the specified Certificate store and location. For Linux + Compute Nodes, the Certificates are stored in a directory inside the Task + working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is + supplied to the Task to query for this location. For Certificates with + visibility of 'remoteUser', a 'certs' directory is created in the user's + home directory (e.g., /home/{user-name}/certs) and Certificates are placed + in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param errors: The list of errors that are currently being encountered by + the Compute Node. + :type errors: list[~azure.batch.models.ComputeNodeError] + :param is_dedicated: Whether this Compute Node is a dedicated Compute + Node. If false, the Compute Node is a low-priority Compute Node. + :type is_dedicated: bool + :param endpoint_configuration: The endpoint configuration for the Compute + Node. + :type endpoint_configuration: + ~azure.batch.models.ComputeNodeEndpointConfiguration + :param node_agent_info: Information about the Compute Node agent version + and the time the Compute Node upgraded to a new version. + :type node_agent_info: ~azure.batch.models.NodeAgentInformation + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'ComputeNodeState'}, + 'scheduling_state': {'key': 'schedulingState', 'type': 'SchedulingState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'last_boot_time': {'key': 'lastBootTime', 'type': 'iso-8601'}, + 'allocation_time': {'key': 'allocationTime', 'type': 'iso-8601'}, + 'ip_address': {'key': 'ipAddress', 'type': 'str'}, + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'total_tasks_run': {'key': 'totalTasksRun', 'type': 'int'}, + 'running_tasks_count': {'key': 'runningTasksCount', 'type': 'int'}, + 'total_tasks_succeeded': {'key': 'totalTasksSucceeded', 'type': 'int'}, + 'recent_tasks': {'key': 'recentTasks', 'type': '[TaskInformation]'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'start_task_info': {'key': 'startTaskInfo', 'type': 'StartTaskInformation'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'errors': {'key': 'errors', 'type': '[ComputeNodeError]'}, + 'is_dedicated': {'key': 'isDedicated', 'type': 'bool'}, + 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'ComputeNodeEndpointConfiguration'}, + 'node_agent_info': {'key': 'nodeAgentInfo', 'type': 'NodeAgentInformation'}, + } + + def __init__(self, *, id: str=None, url: str=None, state=None, scheduling_state=None, state_transition_time=None, last_boot_time=None, allocation_time=None, ip_address: str=None, affinity_id: str=None, vm_size: str=None, total_tasks_run: int=None, running_tasks_count: int=None, total_tasks_succeeded: int=None, recent_tasks=None, start_task=None, start_task_info=None, certificate_references=None, errors=None, is_dedicated: bool=None, endpoint_configuration=None, node_agent_info=None, **kwargs) -> None: + super(ComputeNode, self).__init__(**kwargs) + self.id = id + self.url = url + self.state = state + self.scheduling_state = scheduling_state + self.state_transition_time = state_transition_time + self.last_boot_time = last_boot_time + self.allocation_time = allocation_time + self.ip_address = ip_address + self.affinity_id = affinity_id + self.vm_size = vm_size + self.total_tasks_run = total_tasks_run + self.running_tasks_count = running_tasks_count + self.total_tasks_succeeded = total_tasks_succeeded + self.recent_tasks = recent_tasks + self.start_task = start_task + self.start_task_info = start_task_info + self.certificate_references = certificate_references + self.errors = errors + self.is_dedicated = is_dedicated + self.endpoint_configuration = endpoint_configuration + self.node_agent_info = node_agent_info diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_reboot_options.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_reboot_options.py new file mode 100644 index 00000000..182c563e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_reboot_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeRebootOptions(Model): + """Additional parameters for reboot operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeRebootOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_reboot_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_reboot_options_py3.py new file mode 100644 index 00000000..97e8cb41 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_reboot_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeRebootOptions(Model): + """Additional parameters for reboot operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeRebootOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_reimage_options.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_reimage_options.py new file mode 100644 index 00000000..8ec6e55f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_reimage_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeReimageOptions(Model): + """Additional parameters for reimage operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeReimageOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_reimage_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_reimage_options_py3.py new file mode 100644 index 00000000..dcff3ee8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_reimage_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeReimageOptions(Model): + """Additional parameters for reimage operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeReimageOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_update_user_options.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_update_user_options.py new file mode 100644 index 00000000..ed1f9548 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_update_user_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUpdateUserOptions(Model): + """Additional parameters for update_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeUpdateUserOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_update_user_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_update_user_options_py3.py new file mode 100644 index 00000000..81e45b6c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_update_user_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUpdateUserOptions(Model): + """Additional parameters for update_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeUpdateUserOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_upload_batch_service_logs_options.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_upload_batch_service_logs_options.py new file mode 100644 index 00000000..071b712e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_upload_batch_service_logs_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUploadBatchServiceLogsOptions(Model): + """Additional parameters for upload_batch_service_logs operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeUploadBatchServiceLogsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_upload_batch_service_logs_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_upload_batch_service_logs_options_py3.py new file mode 100644 index 00000000..bac1dad5 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_upload_batch_service_logs_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUploadBatchServiceLogsOptions(Model): + """Additional parameters for upload_batch_service_logs operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeUploadBatchServiceLogsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_user.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_user.py new file mode 100644 index 00000000..af365f75 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_user.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUser(Model): + """A user Account for RDP or SSH access on a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The user name of the Account. + :type name: str + :param is_admin: Whether the Account should be an administrator on the + Compute Node. The default value is false. + :type is_admin: bool + :param expiry_time: The time at which the Account should expire. If + omitted, the default is 1 day from the current time. For Linux Compute + Nodes, the expiryTime has a precision up to a day. + :type expiry_time: datetime + :param password: The password of the Account. The password is required for + Windows Compute Nodes (those created with 'cloudServiceConfiguration', or + created with 'virtualMachineConfiguration' using a Windows Image + reference). For Linux Compute Nodes, the password can optionally be + specified along with the sshPublicKey property. + :type password: str + :param ssh_public_key: The SSH public key that can be used for remote + login to the Compute Node. The public key should be compatible with + OpenSSH encoding and should be base 64 encoded. This property can be + specified only for Linux Compute Nodes. If this is specified for a Windows + Compute Node, then the Batch service rejects the request; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + :type ssh_public_key: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'is_admin': {'key': 'isAdmin', 'type': 'bool'}, + 'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'}, + 'password': {'key': 'password', 'type': 'str'}, + 'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeUser, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.is_admin = kwargs.get('is_admin', None) + self.expiry_time = kwargs.get('expiry_time', None) + self.password = kwargs.get('password', None) + self.ssh_public_key = kwargs.get('ssh_public_key', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/compute_node_user_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_user_py3.py new file mode 100644 index 00000000..36f1ef5b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/compute_node_user_py3.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUser(Model): + """A user Account for RDP or SSH access on a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The user name of the Account. + :type name: str + :param is_admin: Whether the Account should be an administrator on the + Compute Node. The default value is false. + :type is_admin: bool + :param expiry_time: The time at which the Account should expire. If + omitted, the default is 1 day from the current time. For Linux Compute + Nodes, the expiryTime has a precision up to a day. + :type expiry_time: datetime + :param password: The password of the Account. The password is required for + Windows Compute Nodes (those created with 'cloudServiceConfiguration', or + created with 'virtualMachineConfiguration' using a Windows Image + reference). For Linux Compute Nodes, the password can optionally be + specified along with the sshPublicKey property. + :type password: str + :param ssh_public_key: The SSH public key that can be used for remote + login to the Compute Node. The public key should be compatible with + OpenSSH encoding and should be base 64 encoded. This property can be + specified only for Linux Compute Nodes. If this is specified for a Windows + Compute Node, then the Batch service rejects the request; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + :type ssh_public_key: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'is_admin': {'key': 'isAdmin', 'type': 'bool'}, + 'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'}, + 'password': {'key': 'password', 'type': 'str'}, + 'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'}, + } + + def __init__(self, *, name: str, is_admin: bool=None, expiry_time=None, password: str=None, ssh_public_key: str=None, **kwargs) -> None: + super(ComputeNodeUser, self).__init__(**kwargs) + self.name = name + self.is_admin = is_admin + self.expiry_time = expiry_time + self.password = password + self.ssh_public_key = ssh_public_key diff --git a/azext/generated/sdk/batch/v2019_06_01/models/container_configuration.py b/azext/generated/sdk/batch/v2019_06_01/models/container_configuration.py new file mode 100644 index 00000000..ae90c83b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/container_configuration.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerConfiguration(Model): + """The configuration for container-enabled Pools. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar type: Required. The container technology to be used. Default value: + "dockerCompatible" . + :vartype type: str + :param container_image_names: The collection of container Image names. + This is the full Image reference, as would be specified to "docker pull". + An Image will be sourced from the default Docker registry unless the Image + is fully qualified with an alternative registry. + :type container_image_names: list[str] + :param container_registries: Additional private registries from which + containers can be pulled. If any Images must be downloaded from a private + registry which requires credentials, then those credentials must be + provided here. + :type container_registries: list[~azure.batch.models.ContainerRegistry] + """ + + _validation = { + 'type': {'required': True, 'constant': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'container_image_names': {'key': 'containerImageNames', 'type': '[str]'}, + 'container_registries': {'key': 'containerRegistries', 'type': '[ContainerRegistry]'}, + } + + type = "dockerCompatible" + + def __init__(self, **kwargs): + super(ContainerConfiguration, self).__init__(**kwargs) + self.container_image_names = kwargs.get('container_image_names', None) + self.container_registries = kwargs.get('container_registries', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/container_configuration_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/container_configuration_py3.py new file mode 100644 index 00000000..36885632 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/container_configuration_py3.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerConfiguration(Model): + """The configuration for container-enabled Pools. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar type: Required. The container technology to be used. Default value: + "dockerCompatible" . + :vartype type: str + :param container_image_names: The collection of container Image names. + This is the full Image reference, as would be specified to "docker pull". + An Image will be sourced from the default Docker registry unless the Image + is fully qualified with an alternative registry. + :type container_image_names: list[str] + :param container_registries: Additional private registries from which + containers can be pulled. If any Images must be downloaded from a private + registry which requires credentials, then those credentials must be + provided here. + :type container_registries: list[~azure.batch.models.ContainerRegistry] + """ + + _validation = { + 'type': {'required': True, 'constant': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'container_image_names': {'key': 'containerImageNames', 'type': '[str]'}, + 'container_registries': {'key': 'containerRegistries', 'type': '[ContainerRegistry]'}, + } + + type = "dockerCompatible" + + def __init__(self, *, container_image_names=None, container_registries=None, **kwargs) -> None: + super(ContainerConfiguration, self).__init__(**kwargs) + self.container_image_names = container_image_names + self.container_registries = container_registries diff --git a/azext/generated/sdk/batch/v2019_06_01/models/container_registry.py b/azext/generated/sdk/batch/v2019_06_01/models/container_registry.py new file mode 100644 index 00000000..18203196 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/container_registry.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerRegistry(Model): + """A private container registry. + + All required parameters must be populated in order to send to Azure. + + :param registry_server: The registry URL. If omitted, the default is + "docker.io". + :type registry_server: str + :param user_name: Required. The user name to log into the registry server. + :type user_name: str + :param password: Required. The password to log into the registry server. + :type password: str + """ + + _validation = { + 'user_name': {'required': True}, + 'password': {'required': True}, + } + + _attribute_map = { + 'registry_server': {'key': 'registryServer', 'type': 'str'}, + 'user_name': {'key': 'username', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ContainerRegistry, self).__init__(**kwargs) + self.registry_server = kwargs.get('registry_server', None) + self.user_name = kwargs.get('user_name', None) + self.password = kwargs.get('password', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/container_registry_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/container_registry_py3.py new file mode 100644 index 00000000..eb47f9e5 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/container_registry_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerRegistry(Model): + """A private container registry. + + All required parameters must be populated in order to send to Azure. + + :param registry_server: The registry URL. If omitted, the default is + "docker.io". + :type registry_server: str + :param user_name: Required. The user name to log into the registry server. + :type user_name: str + :param password: Required. The password to log into the registry server. + :type password: str + """ + + _validation = { + 'user_name': {'required': True}, + 'password': {'required': True}, + } + + _attribute_map = { + 'registry_server': {'key': 'registryServer', 'type': 'str'}, + 'user_name': {'key': 'username', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, *, user_name: str, password: str, registry_server: str=None, **kwargs) -> None: + super(ContainerRegistry, self).__init__(**kwargs) + self.registry_server = registry_server + self.user_name = user_name + self.password = password diff --git a/azext/generated/sdk/batch/v2019_06_01/models/data_disk.py b/azext/generated/sdk/batch/v2019_06_01/models/data_disk.py new file mode 100644 index 00000000..62430e5d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/data_disk.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DataDisk(Model): + """Settings which will be used by the data disks associated to Compute Nodes + in the Pool. + + All required parameters must be populated in order to send to Azure. + + :param lun: Required. The logical unit number. The lun is used to uniquely + identify each data disk. If attaching multiple disks, each should have a + distinct lun. + :type lun: int + :param caching: The type of caching to be enabled for the data disks. The + default value for caching is readwrite. For information about the caching + options see: + https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. + Possible values include: 'none', 'readOnly', 'readWrite' + :type caching: str or ~azure.batch.models.CachingType + :param disk_size_gb: Required. The initial disk size in gigabytes. + :type disk_size_gb: int + :param storage_account_type: The storage Account type to be used for the + data disk. If omitted, the default is "standard_lrs". Possible values + include: 'StandardLRS', 'PremiumLRS' + :type storage_account_type: str or ~azure.batch.models.StorageAccountType + """ + + _validation = { + 'lun': {'required': True}, + 'disk_size_gb': {'required': True}, + } + + _attribute_map = { + 'lun': {'key': 'lun', 'type': 'int'}, + 'caching': {'key': 'caching', 'type': 'CachingType'}, + 'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'}, + 'storage_account_type': {'key': 'storageAccountType', 'type': 'StorageAccountType'}, + } + + def __init__(self, **kwargs): + super(DataDisk, self).__init__(**kwargs) + self.lun = kwargs.get('lun', None) + self.caching = kwargs.get('caching', None) + self.disk_size_gb = kwargs.get('disk_size_gb', None) + self.storage_account_type = kwargs.get('storage_account_type', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/data_disk_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/data_disk_py3.py new file mode 100644 index 00000000..1965e454 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/data_disk_py3.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DataDisk(Model): + """Settings which will be used by the data disks associated to Compute Nodes + in the Pool. + + All required parameters must be populated in order to send to Azure. + + :param lun: Required. The logical unit number. The lun is used to uniquely + identify each data disk. If attaching multiple disks, each should have a + distinct lun. + :type lun: int + :param caching: The type of caching to be enabled for the data disks. The + default value for caching is readwrite. For information about the caching + options see: + https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. + Possible values include: 'none', 'readOnly', 'readWrite' + :type caching: str or ~azure.batch.models.CachingType + :param disk_size_gb: Required. The initial disk size in gigabytes. + :type disk_size_gb: int + :param storage_account_type: The storage Account type to be used for the + data disk. If omitted, the default is "standard_lrs". Possible values + include: 'StandardLRS', 'PremiumLRS' + :type storage_account_type: str or ~azure.batch.models.StorageAccountType + """ + + _validation = { + 'lun': {'required': True}, + 'disk_size_gb': {'required': True}, + } + + _attribute_map = { + 'lun': {'key': 'lun', 'type': 'int'}, + 'caching': {'key': 'caching', 'type': 'CachingType'}, + 'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'}, + 'storage_account_type': {'key': 'storageAccountType', 'type': 'StorageAccountType'}, + } + + def __init__(self, *, lun: int, disk_size_gb: int, caching=None, storage_account_type=None, **kwargs) -> None: + super(DataDisk, self).__init__(**kwargs) + self.lun = lun + self.caching = caching + self.disk_size_gb = disk_size_gb + self.storage_account_type = storage_account_type diff --git a/azext/generated/sdk/batch/v2019_06_01/models/delete_certificate_error.py b/azext/generated/sdk/batch/v2019_06_01/models/delete_certificate_error.py new file mode 100644 index 00000000..01a2ab5e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/delete_certificate_error.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DeleteCertificateError(Model): + """An error encountered by the Batch service when deleting a Certificate. + + :param code: An identifier for the Certificate deletion error. Codes are + invariant and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the Certificate deletion error, + intended to be suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the + Certificate deletion error. This list includes details such as the active + Pools and Compute Nodes referencing this Certificate. However, if a large + number of resources reference the Certificate, the list contains only + about the first hundred. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(DeleteCertificateError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.values = kwargs.get('values', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/delete_certificate_error_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/delete_certificate_error_py3.py new file mode 100644 index 00000000..e7be00fc --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/delete_certificate_error_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DeleteCertificateError(Model): + """An error encountered by the Batch service when deleting a Certificate. + + :param code: An identifier for the Certificate deletion error. Codes are + invariant and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the Certificate deletion error, + intended to be suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the + Certificate deletion error. This list includes details such as the active + Pools and Compute Nodes referencing this Certificate. However, if a large + number of resources reference the Certificate, the list contains only + about the first hundred. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, code: str=None, message: str=None, values=None, **kwargs) -> None: + super(DeleteCertificateError, self).__init__(**kwargs) + self.code = code + self.message = message + self.values = values diff --git a/azext/generated/sdk/batch/v2019_06_01/models/environment_setting.py b/azext/generated/sdk/batch/v2019_06_01/models/environment_setting.py new file mode 100644 index 00000000..f2039d98 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/environment_setting.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class EnvironmentSetting(Model): + """An environment variable to be set on a Task process. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the environment variable. + :type name: str + :param value: The value of the environment variable. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(EnvironmentSetting, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/environment_setting_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/environment_setting_py3.py new file mode 100644 index 00000000..7a938844 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/environment_setting_py3.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class EnvironmentSetting(Model): + """An environment variable to be set on a Task process. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the environment variable. + :type name: str + :param value: The value of the environment variable. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, *, name: str, value: str=None, **kwargs) -> None: + super(EnvironmentSetting, self).__init__(**kwargs) + self.name = name + self.value = value diff --git a/azext/generated/sdk/batch/v2019_06_01/models/error_message.py b/azext/generated/sdk/batch/v2019_06_01/models/error_message.py new file mode 100644 index 00000000..bbdf64f1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/error_message.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ErrorMessage(Model): + """An error message received in an Azure Batch error response. + + :param lang: The language code of the error message. + :type lang: str + :param value: The text of the message. + :type value: str + """ + + _attribute_map = { + 'lang': {'key': 'lang', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ErrorMessage, self).__init__(**kwargs) + self.lang = kwargs.get('lang', None) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/error_message_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/error_message_py3.py new file mode 100644 index 00000000..a84934fc --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/error_message_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ErrorMessage(Model): + """An error message received in an Azure Batch error response. + + :param lang: The language code of the error message. + :type lang: str + :param value: The text of the message. + :type value: str + """ + + _attribute_map = { + 'lang': {'key': 'lang', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, *, lang: str=None, value: str=None, **kwargs) -> None: + super(ErrorMessage, self).__init__(**kwargs) + self.lang = lang + self.value = value diff --git a/azext/generated/sdk/batch/v2019_06_01/models/exit_code_mapping.py b/azext/generated/sdk/batch/v2019_06_01/models/exit_code_mapping.py new file mode 100644 index 00000000..7b18108a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/exit_code_mapping.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitCodeMapping(Model): + """How the Batch service should respond if a Task exits with a particular exit + code. + + All required parameters must be populated in order to send to Azure. + + :param code: Required. A process exit code. + :type code: int + :param exit_options: Required. How the Batch service should respond if the + Task exits with this exit code. + :type exit_options: ~azure.batch.models.ExitOptions + """ + + _validation = { + 'code': {'required': True}, + 'exit_options': {'required': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'int'}, + 'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'}, + } + + def __init__(self, **kwargs): + super(ExitCodeMapping, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.exit_options = kwargs.get('exit_options', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/exit_code_mapping_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/exit_code_mapping_py3.py new file mode 100644 index 00000000..01a0659a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/exit_code_mapping_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitCodeMapping(Model): + """How the Batch service should respond if a Task exits with a particular exit + code. + + All required parameters must be populated in order to send to Azure. + + :param code: Required. A process exit code. + :type code: int + :param exit_options: Required. How the Batch service should respond if the + Task exits with this exit code. + :type exit_options: ~azure.batch.models.ExitOptions + """ + + _validation = { + 'code': {'required': True}, + 'exit_options': {'required': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'int'}, + 'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'}, + } + + def __init__(self, *, code: int, exit_options, **kwargs) -> None: + super(ExitCodeMapping, self).__init__(**kwargs) + self.code = code + self.exit_options = exit_options diff --git a/azext/generated/sdk/batch/v2019_06_01/models/exit_code_range_mapping.py b/azext/generated/sdk/batch/v2019_06_01/models/exit_code_range_mapping.py new file mode 100644 index 00000000..6b988bad --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/exit_code_range_mapping.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitCodeRangeMapping(Model): + """A range of exit codes and how the Batch service should respond to exit + codes within that range. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. The first exit code in the range. + :type start: int + :param end: Required. The last exit code in the range. + :type end: int + :param exit_options: Required. How the Batch service should respond if the + Task exits with an exit code in the range start to end (inclusive). + :type exit_options: ~azure.batch.models.ExitOptions + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + 'exit_options': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'start', 'type': 'int'}, + 'end': {'key': 'end', 'type': 'int'}, + 'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'}, + } + + def __init__(self, **kwargs): + super(ExitCodeRangeMapping, self).__init__(**kwargs) + self.start = kwargs.get('start', None) + self.end = kwargs.get('end', None) + self.exit_options = kwargs.get('exit_options', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/exit_code_range_mapping_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/exit_code_range_mapping_py3.py new file mode 100644 index 00000000..51c7b3be --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/exit_code_range_mapping_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitCodeRangeMapping(Model): + """A range of exit codes and how the Batch service should respond to exit + codes within that range. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. The first exit code in the range. + :type start: int + :param end: Required. The last exit code in the range. + :type end: int + :param exit_options: Required. How the Batch service should respond if the + Task exits with an exit code in the range start to end (inclusive). + :type exit_options: ~azure.batch.models.ExitOptions + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + 'exit_options': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'start', 'type': 'int'}, + 'end': {'key': 'end', 'type': 'int'}, + 'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'}, + } + + def __init__(self, *, start: int, end: int, exit_options, **kwargs) -> None: + super(ExitCodeRangeMapping, self).__init__(**kwargs) + self.start = start + self.end = end + self.exit_options = exit_options diff --git a/azext/generated/sdk/batch/v2019_06_01/models/exit_conditions.py b/azext/generated/sdk/batch/v2019_06_01/models/exit_conditions.py new file mode 100644 index 00000000..b23924ad --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/exit_conditions.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitConditions(Model): + """Specifies how the Batch service should respond when the Task completes. + + :param exit_codes: A list of individual Task exit codes and how the Batch + service should respond to them. + :type exit_codes: list[~azure.batch.models.ExitCodeMapping] + :param exit_code_ranges: A list of Task exit code ranges and how the Batch + service should respond to them. + :type exit_code_ranges: list[~azure.batch.models.ExitCodeRangeMapping] + :param pre_processing_error: How the Batch service should respond if the + Task fails to start due to an error. + :type pre_processing_error: ~azure.batch.models.ExitOptions + :param file_upload_error: How the Batch service should respond if a file + upload error occurs. If the Task exited with an exit code that was + specified via exitCodes or exitCodeRanges, and then encountered a file + upload error, then the action specified by the exit code takes precedence. + :type file_upload_error: ~azure.batch.models.ExitOptions + :param default: How the Batch service should respond if the Task fails + with an exit condition not covered by any of the other properties. This + value is used if the Task exits with any nonzero exit code not listed in + the exitCodes or exitCodeRanges collection, with a pre-processing error if + the preProcessingError property is not present, or with a file upload + error if the fileUploadError property is not present. If you want + non-default behavior on exit code 0, you must list it explicitly using the + exitCodes or exitCodeRanges collection. + :type default: ~azure.batch.models.ExitOptions + """ + + _attribute_map = { + 'exit_codes': {'key': 'exitCodes', 'type': '[ExitCodeMapping]'}, + 'exit_code_ranges': {'key': 'exitCodeRanges', 'type': '[ExitCodeRangeMapping]'}, + 'pre_processing_error': {'key': 'preProcessingError', 'type': 'ExitOptions'}, + 'file_upload_error': {'key': 'fileUploadError', 'type': 'ExitOptions'}, + 'default': {'key': 'default', 'type': 'ExitOptions'}, + } + + def __init__(self, **kwargs): + super(ExitConditions, self).__init__(**kwargs) + self.exit_codes = kwargs.get('exit_codes', None) + self.exit_code_ranges = kwargs.get('exit_code_ranges', None) + self.pre_processing_error = kwargs.get('pre_processing_error', None) + self.file_upload_error = kwargs.get('file_upload_error', None) + self.default = kwargs.get('default', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/exit_conditions_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/exit_conditions_py3.py new file mode 100644 index 00000000..89fb190d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/exit_conditions_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitConditions(Model): + """Specifies how the Batch service should respond when the Task completes. + + :param exit_codes: A list of individual Task exit codes and how the Batch + service should respond to them. + :type exit_codes: list[~azure.batch.models.ExitCodeMapping] + :param exit_code_ranges: A list of Task exit code ranges and how the Batch + service should respond to them. + :type exit_code_ranges: list[~azure.batch.models.ExitCodeRangeMapping] + :param pre_processing_error: How the Batch service should respond if the + Task fails to start due to an error. + :type pre_processing_error: ~azure.batch.models.ExitOptions + :param file_upload_error: How the Batch service should respond if a file + upload error occurs. If the Task exited with an exit code that was + specified via exitCodes or exitCodeRanges, and then encountered a file + upload error, then the action specified by the exit code takes precedence. + :type file_upload_error: ~azure.batch.models.ExitOptions + :param default: How the Batch service should respond if the Task fails + with an exit condition not covered by any of the other properties. This + value is used if the Task exits with any nonzero exit code not listed in + the exitCodes or exitCodeRanges collection, with a pre-processing error if + the preProcessingError property is not present, or with a file upload + error if the fileUploadError property is not present. If you want + non-default behavior on exit code 0, you must list it explicitly using the + exitCodes or exitCodeRanges collection. + :type default: ~azure.batch.models.ExitOptions + """ + + _attribute_map = { + 'exit_codes': {'key': 'exitCodes', 'type': '[ExitCodeMapping]'}, + 'exit_code_ranges': {'key': 'exitCodeRanges', 'type': '[ExitCodeRangeMapping]'}, + 'pre_processing_error': {'key': 'preProcessingError', 'type': 'ExitOptions'}, + 'file_upload_error': {'key': 'fileUploadError', 'type': 'ExitOptions'}, + 'default': {'key': 'default', 'type': 'ExitOptions'}, + } + + def __init__(self, *, exit_codes=None, exit_code_ranges=None, pre_processing_error=None, file_upload_error=None, default=None, **kwargs) -> None: + super(ExitConditions, self).__init__(**kwargs) + self.exit_codes = exit_codes + self.exit_code_ranges = exit_code_ranges + self.pre_processing_error = pre_processing_error + self.file_upload_error = file_upload_error + self.default = default diff --git a/azext/generated/sdk/batch/v2019_06_01/models/exit_options.py b/azext/generated/sdk/batch/v2019_06_01/models/exit_options.py new file mode 100644 index 00000000..9d42c2b9 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/exit_options.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitOptions(Model): + """Specifies how the Batch service responds to a particular exit condition. + + :param job_action: An action to take on the Job containing the Task, if + the Task completes with the given exit condition and the Job's + onTaskFailed property is 'performExitOptionsJobAction'. The default is + none for exit code 0 and terminate for all other exit conditions. If the + Job's onTaskFailed property is noaction, then specifying this property + returns an error and the add Task request fails with an invalid property + value error; if you are calling the REST API directly, the HTTP status + code is 400 (Bad Request). Possible values include: 'none', 'disable', + 'terminate' + :type job_action: str or ~azure.batch.models.JobAction + :param dependency_action: An action that the Batch service performs on + Tasks that depend on this Task. The default is 'satisfy' for exit code 0, + and 'block' for all other exit conditions. If the Job's + usesTaskDependencies property is set to false, then specifying the + dependencyAction property returns an error and the add Task request fails + with an invalid property value error; if you are calling the REST API + directly, the HTTP status code is 400 (Bad Request). Possible values + include: 'satisfy', 'block' + :type dependency_action: str or ~azure.batch.models.DependencyAction + """ + + _attribute_map = { + 'job_action': {'key': 'jobAction', 'type': 'JobAction'}, + 'dependency_action': {'key': 'dependencyAction', 'type': 'DependencyAction'}, + } + + def __init__(self, **kwargs): + super(ExitOptions, self).__init__(**kwargs) + self.job_action = kwargs.get('job_action', None) + self.dependency_action = kwargs.get('dependency_action', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/exit_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/exit_options_py3.py new file mode 100644 index 00000000..5a314662 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/exit_options_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitOptions(Model): + """Specifies how the Batch service responds to a particular exit condition. + + :param job_action: An action to take on the Job containing the Task, if + the Task completes with the given exit condition and the Job's + onTaskFailed property is 'performExitOptionsJobAction'. The default is + none for exit code 0 and terminate for all other exit conditions. If the + Job's onTaskFailed property is noaction, then specifying this property + returns an error and the add Task request fails with an invalid property + value error; if you are calling the REST API directly, the HTTP status + code is 400 (Bad Request). Possible values include: 'none', 'disable', + 'terminate' + :type job_action: str or ~azure.batch.models.JobAction + :param dependency_action: An action that the Batch service performs on + Tasks that depend on this Task. The default is 'satisfy' for exit code 0, + and 'block' for all other exit conditions. If the Job's + usesTaskDependencies property is set to false, then specifying the + dependencyAction property returns an error and the add Task request fails + with an invalid property value error; if you are calling the REST API + directly, the HTTP status code is 400 (Bad Request). Possible values + include: 'satisfy', 'block' + :type dependency_action: str or ~azure.batch.models.DependencyAction + """ + + _attribute_map = { + 'job_action': {'key': 'jobAction', 'type': 'JobAction'}, + 'dependency_action': {'key': 'dependencyAction', 'type': 'DependencyAction'}, + } + + def __init__(self, *, job_action=None, dependency_action=None, **kwargs) -> None: + super(ExitOptions, self).__init__(**kwargs) + self.job_action = job_action + self.dependency_action = dependency_action diff --git a/azext/generated/sdk/batch/v2019_06_01/models/file_delete_from_compute_node_options.py b/azext/generated/sdk/batch/v2019_06_01/models/file_delete_from_compute_node_options.py new file mode 100644 index 00000000..7522e806 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/file_delete_from_compute_node_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileDeleteFromComputeNodeOptions(Model): + """Additional parameters for delete_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileDeleteFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/file_delete_from_compute_node_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/file_delete_from_compute_node_options_py3.py new file mode 100644 index 00000000..62291d14 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/file_delete_from_compute_node_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileDeleteFromComputeNodeOptions(Model): + """Additional parameters for delete_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(FileDeleteFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/file_delete_from_task_options.py b/azext/generated/sdk/batch/v2019_06_01/models/file_delete_from_task_options.py new file mode 100644 index 00000000..054babe8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/file_delete_from_task_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileDeleteFromTaskOptions(Model): + """Additional parameters for delete_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileDeleteFromTaskOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/file_delete_from_task_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/file_delete_from_task_options_py3.py new file mode 100644 index 00000000..7d783006 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/file_delete_from_task_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileDeleteFromTaskOptions(Model): + """Additional parameters for delete_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(FileDeleteFromTaskOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/file_get_from_compute_node_options.py b/azext/generated/sdk/batch/v2019_06_01/models/file_get_from_compute_node_options.py new file mode 100644 index 00000000..9a6e3fb7 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/file_get_from_compute_node_options.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetFromComputeNodeOptions(Model): + """Additional parameters for get_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param ocp_range: The byte range to be retrieved. The default is to + retrieve the entire file. The format is bytes=startRange-endRange. + :type ocp_range: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'ocp_range': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileGetFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.ocp_range = kwargs.get('ocp_range', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/file_get_from_compute_node_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/file_get_from_compute_node_options_py3.py new file mode 100644 index 00000000..ab3dc34f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/file_get_from_compute_node_options_py3.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetFromComputeNodeOptions(Model): + """Additional parameters for get_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param ocp_range: The byte range to be retrieved. The default is to + retrieve the entire file. The format is bytes=startRange-endRange. + :type ocp_range: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'ocp_range': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, ocp_range: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(FileGetFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.ocp_range = ocp_range + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/file_get_from_task_options.py b/azext/generated/sdk/batch/v2019_06_01/models/file_get_from_task_options.py new file mode 100644 index 00000000..19bd5cde --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/file_get_from_task_options.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetFromTaskOptions(Model): + """Additional parameters for get_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param ocp_range: The byte range to be retrieved. The default is to + retrieve the entire file. The format is bytes=startRange-endRange. + :type ocp_range: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'ocp_range': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileGetFromTaskOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.ocp_range = kwargs.get('ocp_range', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/file_get_from_task_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/file_get_from_task_options_py3.py new file mode 100644 index 00000000..30ec6583 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/file_get_from_task_options_py3.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetFromTaskOptions(Model): + """Additional parameters for get_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param ocp_range: The byte range to be retrieved. The default is to + retrieve the entire file. The format is bytes=startRange-endRange. + :type ocp_range: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'ocp_range': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, ocp_range: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(FileGetFromTaskOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.ocp_range = ocp_range + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/file_get_properties_from_compute_node_options.py b/azext/generated/sdk/batch/v2019_06_01/models/file_get_properties_from_compute_node_options.py new file mode 100644 index 00000000..bf283d1d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/file_get_properties_from_compute_node_options.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetPropertiesFromComputeNodeOptions(Model): + """Additional parameters for get_properties_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileGetPropertiesFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/file_get_properties_from_compute_node_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/file_get_properties_from_compute_node_options_py3.py new file mode 100644 index 00000000..69a90184 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/file_get_properties_from_compute_node_options_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetPropertiesFromComputeNodeOptions(Model): + """Additional parameters for get_properties_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(FileGetPropertiesFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/file_get_properties_from_task_options.py b/azext/generated/sdk/batch/v2019_06_01/models/file_get_properties_from_task_options.py new file mode 100644 index 00000000..836387d3 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/file_get_properties_from_task_options.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetPropertiesFromTaskOptions(Model): + """Additional parameters for get_properties_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileGetPropertiesFromTaskOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/file_get_properties_from_task_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/file_get_properties_from_task_options_py3.py new file mode 100644 index 00000000..73996895 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/file_get_properties_from_task_options_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetPropertiesFromTaskOptions(Model): + """Additional parameters for get_properties_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(FileGetPropertiesFromTaskOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/file_list_from_compute_node_options.py b/azext/generated/sdk/batch/v2019_06_01/models/file_list_from_compute_node_options.py new file mode 100644 index 00000000..dc32df46 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/file_list_from_compute_node_options.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileListFromComputeNodeOptions(Model): + """Additional parameters for list_from_compute_node operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 files can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileListFromComputeNodeOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/file_list_from_compute_node_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/file_list_from_compute_node_options_py3.py new file mode 100644 index 00000000..e475dcde --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/file_list_from_compute_node_options_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileListFromComputeNodeOptions(Model): + """Additional parameters for list_from_compute_node operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 files can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(FileListFromComputeNodeOptions, self).__init__(**kwargs) + self.filter = filter + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/file_list_from_task_options.py b/azext/generated/sdk/batch/v2019_06_01/models/file_list_from_task_options.py new file mode 100644 index 00000000..86728b25 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/file_list_from_task_options.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileListFromTaskOptions(Model): + """Additional parameters for list_from_task operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 files can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileListFromTaskOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/file_list_from_task_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/file_list_from_task_options_py3.py new file mode 100644 index 00000000..354c4869 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/file_list_from_task_options_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileListFromTaskOptions(Model): + """Additional parameters for list_from_task operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 files can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(FileListFromTaskOptions, self).__init__(**kwargs) + self.filter = filter + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/file_properties.py b/azext/generated/sdk/batch/v2019_06_01/models/file_properties.py new file mode 100644 index 00000000..047a5e72 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/file_properties.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileProperties(Model): + """The properties of a file on a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param creation_time: The file creation time. The creation time is not + returned for files on Linux Compute Nodes. + :type creation_time: datetime + :param last_modified: Required. The time at which the file was last + modified. + :type last_modified: datetime + :param content_length: Required. The length of the file. + :type content_length: long + :param content_type: The content type of the file. + :type content_type: str + :param file_mode: The file mode attribute in octal format. The file mode + is returned only for files on Linux Compute Nodes. + :type file_mode: str + """ + + _validation = { + 'last_modified': {'required': True}, + 'content_length': {'required': True}, + } + + _attribute_map = { + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'content_length': {'key': 'contentLength', 'type': 'long'}, + 'content_type': {'key': 'contentType', 'type': 'str'}, + 'file_mode': {'key': 'fileMode', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(FileProperties, self).__init__(**kwargs) + self.creation_time = kwargs.get('creation_time', None) + self.last_modified = kwargs.get('last_modified', None) + self.content_length = kwargs.get('content_length', None) + self.content_type = kwargs.get('content_type', None) + self.file_mode = kwargs.get('file_mode', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/file_properties_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/file_properties_py3.py new file mode 100644 index 00000000..ccfe33fd --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/file_properties_py3.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileProperties(Model): + """The properties of a file on a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param creation_time: The file creation time. The creation time is not + returned for files on Linux Compute Nodes. + :type creation_time: datetime + :param last_modified: Required. The time at which the file was last + modified. + :type last_modified: datetime + :param content_length: Required. The length of the file. + :type content_length: long + :param content_type: The content type of the file. + :type content_type: str + :param file_mode: The file mode attribute in octal format. The file mode + is returned only for files on Linux Compute Nodes. + :type file_mode: str + """ + + _validation = { + 'last_modified': {'required': True}, + 'content_length': {'required': True}, + } + + _attribute_map = { + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'content_length': {'key': 'contentLength', 'type': 'long'}, + 'content_type': {'key': 'contentType', 'type': 'str'}, + 'file_mode': {'key': 'fileMode', 'type': 'str'}, + } + + def __init__(self, *, last_modified, content_length: int, creation_time=None, content_type: str=None, file_mode: str=None, **kwargs) -> None: + super(FileProperties, self).__init__(**kwargs) + self.creation_time = creation_time + self.last_modified = last_modified + self.content_length = content_length + self.content_type = content_type + self.file_mode = file_mode diff --git a/azext/generated/sdk/batch/v2019_06_01/models/image_information.py b/azext/generated/sdk/batch/v2019_06_01/models/image_information.py new file mode 100644 index 00000000..cc3d7fd7 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/image_information.py @@ -0,0 +1,67 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageInformation(Model): + """A reference to the Azure Virtual Machines Marketplace Image and additional + information about the Image. + + All required parameters must be populated in order to send to Azure. + + :param node_agent_sku_id: Required. The ID of the Compute Node agent SKU + which the Image supports. + :type node_agent_sku_id: str + :param image_reference: Required. The reference to the Azure Virtual + Machine's Marketplace Image. + :type image_reference: ~azure.batch.models.ImageReference + :param os_type: Required. The type of operating system (e.g. Windows or + Linux) of the Image. Possible values include: 'linux', 'windows' + :type os_type: str or ~azure.batch.models.OSType + :param capabilities: The capabilities or features which the Image + supports. Not every capability of the Image is listed. Capabilities in + this list are considered of special interest and are generally related to + integration with other features in the Azure Batch service. + :type capabilities: list[str] + :param batch_support_end_of_life: The time when the Azure Batch service + will stop accepting create Pool requests for the Image. + :type batch_support_end_of_life: datetime + :param verification_type: Required. Whether the Azure Batch service + actively verifies that the Image is compatible with the associated Compute + Node agent SKU. Possible values include: 'verified', 'unverified' + :type verification_type: str or ~azure.batch.models.VerificationType + """ + + _validation = { + 'node_agent_sku_id': {'required': True}, + 'image_reference': {'required': True}, + 'os_type': {'required': True}, + 'verification_type': {'required': True}, + } + + _attribute_map = { + 'node_agent_sku_id': {'key': 'nodeAgentSKUId', 'type': 'str'}, + 'image_reference': {'key': 'imageReference', 'type': 'ImageReference'}, + 'os_type': {'key': 'osType', 'type': 'OSType'}, + 'capabilities': {'key': 'capabilities', 'type': '[str]'}, + 'batch_support_end_of_life': {'key': 'batchSupportEndOfLife', 'type': 'iso-8601'}, + 'verification_type': {'key': 'verificationType', 'type': 'VerificationType'}, + } + + def __init__(self, **kwargs): + super(ImageInformation, self).__init__(**kwargs) + self.node_agent_sku_id = kwargs.get('node_agent_sku_id', None) + self.image_reference = kwargs.get('image_reference', None) + self.os_type = kwargs.get('os_type', None) + self.capabilities = kwargs.get('capabilities', None) + self.batch_support_end_of_life = kwargs.get('batch_support_end_of_life', None) + self.verification_type = kwargs.get('verification_type', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/image_information_paged.py b/azext/generated/sdk/batch/v2019_06_01/models/image_information_paged.py new file mode 100644 index 00000000..27a3647b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/image_information_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class ImageInformationPaged(Paged): + """ + A paging container for iterating over a list of :class:`ImageInformation ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[ImageInformation]'} + } + + def __init__(self, *args, **kwargs): + + super(ImageInformationPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/image_information_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/image_information_py3.py new file mode 100644 index 00000000..7515a9d2 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/image_information_py3.py @@ -0,0 +1,67 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageInformation(Model): + """A reference to the Azure Virtual Machines Marketplace Image and additional + information about the Image. + + All required parameters must be populated in order to send to Azure. + + :param node_agent_sku_id: Required. The ID of the Compute Node agent SKU + which the Image supports. + :type node_agent_sku_id: str + :param image_reference: Required. The reference to the Azure Virtual + Machine's Marketplace Image. + :type image_reference: ~azure.batch.models.ImageReference + :param os_type: Required. The type of operating system (e.g. Windows or + Linux) of the Image. Possible values include: 'linux', 'windows' + :type os_type: str or ~azure.batch.models.OSType + :param capabilities: The capabilities or features which the Image + supports. Not every capability of the Image is listed. Capabilities in + this list are considered of special interest and are generally related to + integration with other features in the Azure Batch service. + :type capabilities: list[str] + :param batch_support_end_of_life: The time when the Azure Batch service + will stop accepting create Pool requests for the Image. + :type batch_support_end_of_life: datetime + :param verification_type: Required. Whether the Azure Batch service + actively verifies that the Image is compatible with the associated Compute + Node agent SKU. Possible values include: 'verified', 'unverified' + :type verification_type: str or ~azure.batch.models.VerificationType + """ + + _validation = { + 'node_agent_sku_id': {'required': True}, + 'image_reference': {'required': True}, + 'os_type': {'required': True}, + 'verification_type': {'required': True}, + } + + _attribute_map = { + 'node_agent_sku_id': {'key': 'nodeAgentSKUId', 'type': 'str'}, + 'image_reference': {'key': 'imageReference', 'type': 'ImageReference'}, + 'os_type': {'key': 'osType', 'type': 'OSType'}, + 'capabilities': {'key': 'capabilities', 'type': '[str]'}, + 'batch_support_end_of_life': {'key': 'batchSupportEndOfLife', 'type': 'iso-8601'}, + 'verification_type': {'key': 'verificationType', 'type': 'VerificationType'}, + } + + def __init__(self, *, node_agent_sku_id: str, image_reference, os_type, verification_type, capabilities=None, batch_support_end_of_life=None, **kwargs) -> None: + super(ImageInformation, self).__init__(**kwargs) + self.node_agent_sku_id = node_agent_sku_id + self.image_reference = image_reference + self.os_type = os_type + self.capabilities = capabilities + self.batch_support_end_of_life = batch_support_end_of_life + self.verification_type = verification_type diff --git a/azext/generated/sdk/batch/v2019_06_01/models/image_reference.py b/azext/generated/sdk/batch/v2019_06_01/models/image_reference.py new file mode 100644 index 00000000..f1e0bc88 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/image_reference.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageReference(Model): + """A reference to an Azure Virtual Machines Marketplace Image or a custom + Azure Virtual Machine Image. To get the list of all Azure Marketplace Image + references verified by Azure Batch, see the 'List supported Images' + operation. + + :param publisher: The publisher of the Azure Virtual Machines Marketplace + Image. For example, Canonical or MicrosoftWindowsServer. + :type publisher: str + :param offer: The offer type of the Azure Virtual Machines Marketplace + Image. For example, UbuntuServer or WindowsServer. + :type offer: str + :param sku: The SKU of the Azure Virtual Machines Marketplace Image. For + example, 14.04.0-LTS or 2012-R2-Datacenter. + :type sku: str + :param version: The version of the Azure Virtual Machines Marketplace + Image. A value of 'latest' can be specified to select the latest version + of an Image. If omitted, the default is 'latest'. + :type version: str + :param virtual_machine_image_id: The ARM resource identifier of the + Virtual Machine Image. Computes Compute Nodes of the Pool will be created + using this custom Image. This is of the form + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. + This property is mutually exclusive with other ImageReference properties. + The Virtual Machine Image must be in the same region and subscription as + the Azure Batch Account. For information about the firewall settings for + the Batch Compute Node agent to communicate with the Batch service see + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + :type virtual_machine_image_id: str + """ + + _attribute_map = { + 'publisher': {'key': 'publisher', 'type': 'str'}, + 'offer': {'key': 'offer', 'type': 'str'}, + 'sku': {'key': 'sku', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'virtual_machine_image_id': {'key': 'virtualMachineImageId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ImageReference, self).__init__(**kwargs) + self.publisher = kwargs.get('publisher', None) + self.offer = kwargs.get('offer', None) + self.sku = kwargs.get('sku', None) + self.version = kwargs.get('version', None) + self.virtual_machine_image_id = kwargs.get('virtual_machine_image_id', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/image_reference_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/image_reference_py3.py new file mode 100644 index 00000000..e0039e25 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/image_reference_py3.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageReference(Model): + """A reference to an Azure Virtual Machines Marketplace Image or a custom + Azure Virtual Machine Image. To get the list of all Azure Marketplace Image + references verified by Azure Batch, see the 'List supported Images' + operation. + + :param publisher: The publisher of the Azure Virtual Machines Marketplace + Image. For example, Canonical or MicrosoftWindowsServer. + :type publisher: str + :param offer: The offer type of the Azure Virtual Machines Marketplace + Image. For example, UbuntuServer or WindowsServer. + :type offer: str + :param sku: The SKU of the Azure Virtual Machines Marketplace Image. For + example, 14.04.0-LTS or 2012-R2-Datacenter. + :type sku: str + :param version: The version of the Azure Virtual Machines Marketplace + Image. A value of 'latest' can be specified to select the latest version + of an Image. If omitted, the default is 'latest'. + :type version: str + :param virtual_machine_image_id: The ARM resource identifier of the + Virtual Machine Image. Computes Compute Nodes of the Pool will be created + using this custom Image. This is of the form + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. + This property is mutually exclusive with other ImageReference properties. + The Virtual Machine Image must be in the same region and subscription as + the Azure Batch Account. For information about the firewall settings for + the Batch Compute Node agent to communicate with the Batch service see + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + :type virtual_machine_image_id: str + """ + + _attribute_map = { + 'publisher': {'key': 'publisher', 'type': 'str'}, + 'offer': {'key': 'offer', 'type': 'str'}, + 'sku': {'key': 'sku', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'virtual_machine_image_id': {'key': 'virtualMachineImageId', 'type': 'str'}, + } + + def __init__(self, *, publisher: str=None, offer: str=None, sku: str=None, version: str=None, virtual_machine_image_id: str=None, **kwargs) -> None: + super(ImageReference, self).__init__(**kwargs) + self.publisher = publisher + self.offer = offer + self.sku = sku + self.version = version + self.virtual_machine_image_id = virtual_machine_image_id diff --git a/azext/generated/sdk/batch/v2019_06_01/models/inbound_endpoint.py b/azext/generated/sdk/batch/v2019_06_01/models/inbound_endpoint.py new file mode 100644 index 00000000..06fecdc7 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/inbound_endpoint.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class InboundEndpoint(Model): + """An inbound endpoint on a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the endpoint. + :type name: str + :param protocol: Required. The protocol of the endpoint. Possible values + include: 'tcp', 'udp' + :type protocol: str or ~azure.batch.models.InboundEndpointProtocol + :param public_ip_address: Required. The public IP address of the Compute + Node. + :type public_ip_address: str + :param public_fqdn: Required. The public fully qualified domain name for + the Compute Node. + :type public_fqdn: str + :param frontend_port: Required. The public port number of the endpoint. + :type frontend_port: int + :param backend_port: Required. The backend port number of the endpoint. + :type backend_port: int + """ + + _validation = { + 'name': {'required': True}, + 'protocol': {'required': True}, + 'public_ip_address': {'required': True}, + 'public_fqdn': {'required': True}, + 'frontend_port': {'required': True}, + 'backend_port': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'protocol': {'key': 'protocol', 'type': 'InboundEndpointProtocol'}, + 'public_ip_address': {'key': 'publicIPAddress', 'type': 'str'}, + 'public_fqdn': {'key': 'publicFQDN', 'type': 'str'}, + 'frontend_port': {'key': 'frontendPort', 'type': 'int'}, + 'backend_port': {'key': 'backendPort', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(InboundEndpoint, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.protocol = kwargs.get('protocol', None) + self.public_ip_address = kwargs.get('public_ip_address', None) + self.public_fqdn = kwargs.get('public_fqdn', None) + self.frontend_port = kwargs.get('frontend_port', None) + self.backend_port = kwargs.get('backend_port', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/inbound_endpoint_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/inbound_endpoint_py3.py new file mode 100644 index 00000000..d8bdbd94 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/inbound_endpoint_py3.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class InboundEndpoint(Model): + """An inbound endpoint on a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the endpoint. + :type name: str + :param protocol: Required. The protocol of the endpoint. Possible values + include: 'tcp', 'udp' + :type protocol: str or ~azure.batch.models.InboundEndpointProtocol + :param public_ip_address: Required. The public IP address of the Compute + Node. + :type public_ip_address: str + :param public_fqdn: Required. The public fully qualified domain name for + the Compute Node. + :type public_fqdn: str + :param frontend_port: Required. The public port number of the endpoint. + :type frontend_port: int + :param backend_port: Required. The backend port number of the endpoint. + :type backend_port: int + """ + + _validation = { + 'name': {'required': True}, + 'protocol': {'required': True}, + 'public_ip_address': {'required': True}, + 'public_fqdn': {'required': True}, + 'frontend_port': {'required': True}, + 'backend_port': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'protocol': {'key': 'protocol', 'type': 'InboundEndpointProtocol'}, + 'public_ip_address': {'key': 'publicIPAddress', 'type': 'str'}, + 'public_fqdn': {'key': 'publicFQDN', 'type': 'str'}, + 'frontend_port': {'key': 'frontendPort', 'type': 'int'}, + 'backend_port': {'key': 'backendPort', 'type': 'int'}, + } + + def __init__(self, *, name: str, protocol, public_ip_address: str, public_fqdn: str, frontend_port: int, backend_port: int, **kwargs) -> None: + super(InboundEndpoint, self).__init__(**kwargs) + self.name = name + self.protocol = protocol + self.public_ip_address = public_ip_address + self.public_fqdn = public_fqdn + self.frontend_port = frontend_port + self.backend_port = backend_port diff --git a/azext/generated/sdk/batch/v2019_06_01/models/inbound_nat_pool.py b/azext/generated/sdk/batch/v2019_06_01/models/inbound_nat_pool.py new file mode 100644 index 00000000..654db312 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/inbound_nat_pool.py @@ -0,0 +1,86 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class InboundNATPool(Model): + """A inbound NAT Pool that can be used to address specific ports on Compute + Nodes in a Batch Pool externally. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the endpoint. The name must be unique + within a Batch Pool, can contain letters, numbers, underscores, periods, + and hyphens. Names must start with a letter or number, must end with a + letter, number, or underscore, and cannot exceed 77 characters. If any + invalid values are provided the request fails with HTTP status code 400. + :type name: str + :param protocol: Required. The protocol of the endpoint. Possible values + include: 'tcp', 'udp' + :type protocol: str or ~azure.batch.models.InboundEndpointProtocol + :param backend_port: Required. The port number on the Compute Node. This + must be unique within a Batch Pool. Acceptable values are between 1 and + 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any + reserved values are provided the request fails with HTTP status code 400. + :type backend_port: int + :param frontend_port_range_start: Required. The first port number in the + range of external ports that will be used to provide inbound access to the + backendPort on individual Compute Nodes. Acceptable values range between 1 + and 65534 except ports from 50000 to 55000 which are reserved. All ranges + within a Pool must be distinct and cannot overlap. Each range must contain + at least 40 ports. If any reserved or overlapping values are provided the + request fails with HTTP status code 400. + :type frontend_port_range_start: int + :param frontend_port_range_end: Required. The last port number in the + range of external ports that will be used to provide inbound access to the + backendPort on individual Compute Nodes. Acceptable values range between 1 + and 65534 except ports from 50000 to 55000 which are reserved by the Batch + service. All ranges within a Pool must be distinct and cannot overlap. + Each range must contain at least 40 ports. If any reserved or overlapping + values are provided the request fails with HTTP status code 400. + :type frontend_port_range_end: int + :param network_security_group_rules: A list of network security group + rules that will be applied to the endpoint. The maximum number of rules + that can be specified across all the endpoints on a Batch Pool is 25. If + no network security group rules are specified, a default rule will be + created to allow inbound access to the specified backendPort. If the + maximum number of network security group rules is exceeded the request + fails with HTTP status code 400. + :type network_security_group_rules: + list[~azure.batch.models.NetworkSecurityGroupRule] + """ + + _validation = { + 'name': {'required': True}, + 'protocol': {'required': True}, + 'backend_port': {'required': True}, + 'frontend_port_range_start': {'required': True}, + 'frontend_port_range_end': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'protocol': {'key': 'protocol', 'type': 'InboundEndpointProtocol'}, + 'backend_port': {'key': 'backendPort', 'type': 'int'}, + 'frontend_port_range_start': {'key': 'frontendPortRangeStart', 'type': 'int'}, + 'frontend_port_range_end': {'key': 'frontendPortRangeEnd', 'type': 'int'}, + 'network_security_group_rules': {'key': 'networkSecurityGroupRules', 'type': '[NetworkSecurityGroupRule]'}, + } + + def __init__(self, **kwargs): + super(InboundNATPool, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.protocol = kwargs.get('protocol', None) + self.backend_port = kwargs.get('backend_port', None) + self.frontend_port_range_start = kwargs.get('frontend_port_range_start', None) + self.frontend_port_range_end = kwargs.get('frontend_port_range_end', None) + self.network_security_group_rules = kwargs.get('network_security_group_rules', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/inbound_nat_pool_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/inbound_nat_pool_py3.py new file mode 100644 index 00000000..63f4a0b2 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/inbound_nat_pool_py3.py @@ -0,0 +1,86 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class InboundNATPool(Model): + """A inbound NAT Pool that can be used to address specific ports on Compute + Nodes in a Batch Pool externally. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the endpoint. The name must be unique + within a Batch Pool, can contain letters, numbers, underscores, periods, + and hyphens. Names must start with a letter or number, must end with a + letter, number, or underscore, and cannot exceed 77 characters. If any + invalid values are provided the request fails with HTTP status code 400. + :type name: str + :param protocol: Required. The protocol of the endpoint. Possible values + include: 'tcp', 'udp' + :type protocol: str or ~azure.batch.models.InboundEndpointProtocol + :param backend_port: Required. The port number on the Compute Node. This + must be unique within a Batch Pool. Acceptable values are between 1 and + 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any + reserved values are provided the request fails with HTTP status code 400. + :type backend_port: int + :param frontend_port_range_start: Required. The first port number in the + range of external ports that will be used to provide inbound access to the + backendPort on individual Compute Nodes. Acceptable values range between 1 + and 65534 except ports from 50000 to 55000 which are reserved. All ranges + within a Pool must be distinct and cannot overlap. Each range must contain + at least 40 ports. If any reserved or overlapping values are provided the + request fails with HTTP status code 400. + :type frontend_port_range_start: int + :param frontend_port_range_end: Required. The last port number in the + range of external ports that will be used to provide inbound access to the + backendPort on individual Compute Nodes. Acceptable values range between 1 + and 65534 except ports from 50000 to 55000 which are reserved by the Batch + service. All ranges within a Pool must be distinct and cannot overlap. + Each range must contain at least 40 ports. If any reserved or overlapping + values are provided the request fails with HTTP status code 400. + :type frontend_port_range_end: int + :param network_security_group_rules: A list of network security group + rules that will be applied to the endpoint. The maximum number of rules + that can be specified across all the endpoints on a Batch Pool is 25. If + no network security group rules are specified, a default rule will be + created to allow inbound access to the specified backendPort. If the + maximum number of network security group rules is exceeded the request + fails with HTTP status code 400. + :type network_security_group_rules: + list[~azure.batch.models.NetworkSecurityGroupRule] + """ + + _validation = { + 'name': {'required': True}, + 'protocol': {'required': True}, + 'backend_port': {'required': True}, + 'frontend_port_range_start': {'required': True}, + 'frontend_port_range_end': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'protocol': {'key': 'protocol', 'type': 'InboundEndpointProtocol'}, + 'backend_port': {'key': 'backendPort', 'type': 'int'}, + 'frontend_port_range_start': {'key': 'frontendPortRangeStart', 'type': 'int'}, + 'frontend_port_range_end': {'key': 'frontendPortRangeEnd', 'type': 'int'}, + 'network_security_group_rules': {'key': 'networkSecurityGroupRules', 'type': '[NetworkSecurityGroupRule]'}, + } + + def __init__(self, *, name: str, protocol, backend_port: int, frontend_port_range_start: int, frontend_port_range_end: int, network_security_group_rules=None, **kwargs) -> None: + super(InboundNATPool, self).__init__(**kwargs) + self.name = name + self.protocol = protocol + self.backend_port = backend_port + self.frontend_port_range_start = frontend_port_range_start + self.frontend_port_range_end = frontend_port_range_end + self.network_security_group_rules = network_security_group_rules diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_add_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_add_options.py new file mode 100644 index 00000000..bdcf7969 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_add_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobAddOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_add_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_add_options_py3.py new file mode 100644 index 00000000..9633e748 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_add_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobAddOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_add_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/job_add_parameter.py new file mode 100644 index 00000000..23d3824e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_add_parameter.py @@ -0,0 +1,138 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobAddParameter(Model): + """An Azure Batch Job to add. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the Job within the + Account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within an Account that differ only by case). + :type id: str + :param display_name: The display name for the Job. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param priority: The priority of the Job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. The default value is 0. + :type priority: int + :param constraints: The execution constraints for the Job. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: Details of a Job Manager Task to be launched when + the Job is started. If the Job does not specify a Job Manager Task, the + user must explicitly add Tasks to the Job. If the Job does specify a Job + Manager Task, the Batch service creates the Job Manager Task when the Job + is created, and will try to schedule the Job Manager Task before + scheduling other Tasks in the Job. The Job Manager Task's typical purpose + is to control and/or monitor Job execution, for example by deciding what + additional Tasks to run, determining when the work is complete, etc. + (However, a Job Manager Task is not restricted to these activities - it is + a fully-fledged Task in the system and perform whatever actions are + required for the Job.) For example, a Job Manager Task might download a + file specified as a parameter, analyze the contents of that file and + submit additional Tasks based on those contents. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation Task. If a Job has a Job + Preparation Task, the Batch service will run the Job Preparation Task on a + Node before starting any Tasks of that Job on that Compute Node. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release Task. A Job Release Task cannot + be specified without also specifying a Job Preparation Task for the Job. + The Batch service runs the Job Release Task on the Nodes that have run the + Job Preparation Task. The primary purpose of the Job Release Task is to + undo changes to Compute Nodes made by the Job Preparation Task. Example + activities include deleting local files, or shutting down services that + were started as part of Job preparation. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: The list of common environment + variable settings. These environment variables are set for all Tasks in + the Job (including the Job Manager, Job Preparation and Job Release + Tasks). Individual Tasks can override an environment setting specified + here by specifying the same setting name with a different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: Required. The Pool on which the Batch service runs the + Job's Tasks. + :type pool_info: ~azure.batch.models.PoolInformation + :param on_all_tasks_complete: The action the Batch service should take + when all Tasks in the Job are in the completed state. Note that if a Job + contains no Tasks, then all Tasks are considered complete. This option is + therefore most commonly used with a Job Manager task; if you want to use + automatic Job termination without a Job Manager, you should initially set + onAllTasksComplete to noaction and update the Job properties to set + onAllTasksComplete to terminatejob once you have finished adding Tasks. + The default is noaction. Possible values include: 'noAction', + 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + Task in the Job fails. A Task is considered to have failed if has a + failureInfo. A failureInfo is set if the Task completes with a non-zero + exit code after exhausting its retry count, or if there was an error + starting the Task, for example due to a resource file download error. The + default is noaction. Possible values include: 'noAction', + 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param metadata: A list of name-value pairs associated with the Job as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param uses_task_dependencies: Whether Tasks in the Job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + :param network_configuration: The network configuration for the Job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration + """ + + _validation = { + 'id': {'required': True}, + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, + } + + def __init__(self, **kwargs): + super(JobAddParameter, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.priority = kwargs.get('priority', None) + self.constraints = kwargs.get('constraints', None) + self.job_manager_task = kwargs.get('job_manager_task', None) + self.job_preparation_task = kwargs.get('job_preparation_task', None) + self.job_release_task = kwargs.get('job_release_task', None) + self.common_environment_settings = kwargs.get('common_environment_settings', None) + self.pool_info = kwargs.get('pool_info', None) + self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) + self.on_task_failure = kwargs.get('on_task_failure', None) + self.metadata = kwargs.get('metadata', None) + self.uses_task_dependencies = kwargs.get('uses_task_dependencies', None) + self.network_configuration = kwargs.get('network_configuration', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_add_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_add_parameter_py3.py new file mode 100644 index 00000000..bd569ff1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_add_parameter_py3.py @@ -0,0 +1,138 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobAddParameter(Model): + """An Azure Batch Job to add. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the Job within the + Account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within an Account that differ only by case). + :type id: str + :param display_name: The display name for the Job. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param priority: The priority of the Job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. The default value is 0. + :type priority: int + :param constraints: The execution constraints for the Job. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: Details of a Job Manager Task to be launched when + the Job is started. If the Job does not specify a Job Manager Task, the + user must explicitly add Tasks to the Job. If the Job does specify a Job + Manager Task, the Batch service creates the Job Manager Task when the Job + is created, and will try to schedule the Job Manager Task before + scheduling other Tasks in the Job. The Job Manager Task's typical purpose + is to control and/or monitor Job execution, for example by deciding what + additional Tasks to run, determining when the work is complete, etc. + (However, a Job Manager Task is not restricted to these activities - it is + a fully-fledged Task in the system and perform whatever actions are + required for the Job.) For example, a Job Manager Task might download a + file specified as a parameter, analyze the contents of that file and + submit additional Tasks based on those contents. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation Task. If a Job has a Job + Preparation Task, the Batch service will run the Job Preparation Task on a + Node before starting any Tasks of that Job on that Compute Node. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release Task. A Job Release Task cannot + be specified without also specifying a Job Preparation Task for the Job. + The Batch service runs the Job Release Task on the Nodes that have run the + Job Preparation Task. The primary purpose of the Job Release Task is to + undo changes to Compute Nodes made by the Job Preparation Task. Example + activities include deleting local files, or shutting down services that + were started as part of Job preparation. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: The list of common environment + variable settings. These environment variables are set for all Tasks in + the Job (including the Job Manager, Job Preparation and Job Release + Tasks). Individual Tasks can override an environment setting specified + here by specifying the same setting name with a different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: Required. The Pool on which the Batch service runs the + Job's Tasks. + :type pool_info: ~azure.batch.models.PoolInformation + :param on_all_tasks_complete: The action the Batch service should take + when all Tasks in the Job are in the completed state. Note that if a Job + contains no Tasks, then all Tasks are considered complete. This option is + therefore most commonly used with a Job Manager task; if you want to use + automatic Job termination without a Job Manager, you should initially set + onAllTasksComplete to noaction and update the Job properties to set + onAllTasksComplete to terminatejob once you have finished adding Tasks. + The default is noaction. Possible values include: 'noAction', + 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + Task in the Job fails. A Task is considered to have failed if has a + failureInfo. A failureInfo is set if the Task completes with a non-zero + exit code after exhausting its retry count, or if there was an error + starting the Task, for example due to a resource file download error. The + default is noaction. Possible values include: 'noAction', + 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param metadata: A list of name-value pairs associated with the Job as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param uses_task_dependencies: Whether Tasks in the Job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + :param network_configuration: The network configuration for the Job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration + """ + + _validation = { + 'id': {'required': True}, + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, + } + + def __init__(self, *, id: str, pool_info, display_name: str=None, priority: int=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, on_all_tasks_complete=None, on_task_failure=None, metadata=None, uses_task_dependencies: bool=None, network_configuration=None, **kwargs) -> None: + super(JobAddParameter, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.priority = priority + self.constraints = constraints + self.job_manager_task = job_manager_task + self.job_preparation_task = job_preparation_task + self.job_release_task = job_release_task + self.common_environment_settings = common_environment_settings + self.pool_info = pool_info + self.on_all_tasks_complete = on_all_tasks_complete + self.on_task_failure = on_task_failure + self.metadata = metadata + self.uses_task_dependencies = uses_task_dependencies + self.network_configuration = network_configuration diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_constraints.py b/azext/generated/sdk/batch/v2019_06_01/models/job_constraints.py new file mode 100644 index 00000000..e53257c1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_constraints.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobConstraints(Model): + """The execution constraints for a Job. + + :param max_wall_clock_time: The maximum elapsed time that the Job may run, + measured from the time the Job is created. If the Job does not complete + within the time limit, the Batch service terminates it and any Tasks that + are still running. In this case, the termination reason will be + MaxWallClockTimeExpiry. If this property is not specified, there is no + time limit on how long the Job may run. + :type max_wall_clock_time: timedelta + :param max_task_retry_count: The maximum number of times each Task may be + retried. The Batch service retries a Task if its exit code is nonzero. + Note that this value specifically controls the number of retries. The + Batch service will try each Task once, and may then retry up to this + limit. For example, if the maximum retry count is 3, Batch tries a Task up + to 4 times (one initial try and 3 retries). If the maximum retry count is + 0, the Batch service does not retry Tasks. If the maximum retry count is + -1, the Batch service retries Tasks without limit. The default value is 0 + (no retries). + :type max_task_retry_count: int + """ + + _attribute_map = { + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(JobConstraints, self).__init__(**kwargs) + self.max_wall_clock_time = kwargs.get('max_wall_clock_time', None) + self.max_task_retry_count = kwargs.get('max_task_retry_count', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_constraints_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_constraints_py3.py new file mode 100644 index 00000000..4dc5e9c8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_constraints_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobConstraints(Model): + """The execution constraints for a Job. + + :param max_wall_clock_time: The maximum elapsed time that the Job may run, + measured from the time the Job is created. If the Job does not complete + within the time limit, the Batch service terminates it and any Tasks that + are still running. In this case, the termination reason will be + MaxWallClockTimeExpiry. If this property is not specified, there is no + time limit on how long the Job may run. + :type max_wall_clock_time: timedelta + :param max_task_retry_count: The maximum number of times each Task may be + retried. The Batch service retries a Task if its exit code is nonzero. + Note that this value specifically controls the number of retries. The + Batch service will try each Task once, and may then retry up to this + limit. For example, if the maximum retry count is 3, Batch tries a Task up + to 4 times (one initial try and 3 retries). If the maximum retry count is + 0, the Batch service does not retry Tasks. If the maximum retry count is + -1, the Batch service retries Tasks without limit. The default value is 0 + (no retries). + :type max_task_retry_count: int + """ + + _attribute_map = { + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + } + + def __init__(self, *, max_wall_clock_time=None, max_task_retry_count: int=None, **kwargs) -> None: + super(JobConstraints, self).__init__(**kwargs) + self.max_wall_clock_time = max_wall_clock_time + self.max_task_retry_count = max_task_retry_count diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_delete_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_delete_options.py new file mode 100644 index 00000000..a537b55e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_delete_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobDeleteOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_delete_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_delete_options_py3.py new file mode 100644 index 00000000..821db0e8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_delete_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobDeleteOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_disable_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_disable_options.py new file mode 100644 index 00000000..c6694516 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_disable_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDisableOptions(Model): + """Additional parameters for disable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobDisableOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_disable_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_disable_options_py3.py new file mode 100644 index 00000000..4b077714 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_disable_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDisableOptions(Model): + """Additional parameters for disable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobDisableOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_disable_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/job_disable_parameter.py new file mode 100644 index 00000000..75f02201 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_disable_parameter.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDisableParameter(Model): + """Options when disabling a Job. + + All required parameters must be populated in order to send to Azure. + + :param disable_tasks: Required. What to do with active Tasks associated + with the Job. Possible values include: 'requeue', 'terminate', 'wait' + :type disable_tasks: str or ~azure.batch.models.DisableJobOption + """ + + _validation = { + 'disable_tasks': {'required': True}, + } + + _attribute_map = { + 'disable_tasks': {'key': 'disableTasks', 'type': 'DisableJobOption'}, + } + + def __init__(self, **kwargs): + super(JobDisableParameter, self).__init__(**kwargs) + self.disable_tasks = kwargs.get('disable_tasks', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_disable_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_disable_parameter_py3.py new file mode 100644 index 00000000..9fb96f22 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_disable_parameter_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDisableParameter(Model): + """Options when disabling a Job. + + All required parameters must be populated in order to send to Azure. + + :param disable_tasks: Required. What to do with active Tasks associated + with the Job. Possible values include: 'requeue', 'terminate', 'wait' + :type disable_tasks: str or ~azure.batch.models.DisableJobOption + """ + + _validation = { + 'disable_tasks': {'required': True}, + } + + _attribute_map = { + 'disable_tasks': {'key': 'disableTasks', 'type': 'DisableJobOption'}, + } + + def __init__(self, *, disable_tasks, **kwargs) -> None: + super(JobDisableParameter, self).__init__(**kwargs) + self.disable_tasks = disable_tasks diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_enable_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_enable_options.py new file mode 100644 index 00000000..182f2b04 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_enable_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobEnableOptions(Model): + """Additional parameters for enable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobEnableOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_enable_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_enable_options_py3.py new file mode 100644 index 00000000..47695f37 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_enable_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobEnableOptions(Model): + """Additional parameters for enable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobEnableOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_execution_information.py b/azext/generated/sdk/batch/v2019_06_01/models/job_execution_information.py new file mode 100644 index 00000000..bca20293 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_execution_information.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobExecutionInformation(Model): + """Contains information about the execution of a Job in the Azure Batch + service. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the Job. This is the time + at which the Job was created. + :type start_time: datetime + :param end_time: The completion time of the Job. This property is set only + if the Job is in the completed state. + :type end_time: datetime + :param pool_id: The ID of the Pool to which this Job is assigned. This + element contains the actual Pool where the Job is assigned. When you get + Job details from the service, they also contain a poolInfo element, which + contains the Pool configuration data from when the Job was added or + updated. That poolInfo element may also contain a poolId element. If it + does, the two IDs are the same. If it does not, it means the Job ran on an + auto Pool, and this property contains the ID of that auto Pool. + :type pool_id: str + :param scheduling_error: Details of any error encountered by the service + in starting the Job. This property is not set if there was no error + starting the Job. + :type scheduling_error: ~azure.batch.models.JobSchedulingError + :param terminate_reason: A string describing the reason the Job ended. + This property is set only if the Job is in the completed state. If the + Batch service terminates the Job, it sets the reason as follows: + JMComplete - the Job Manager Task completed, and killJobOnCompletion was + set to true. MaxWallClockTimeExpiry - the Job reached its maxWallClockTime + constraint. TerminateJobSchedule - the Job ran as part of a schedule, and + the schedule terminated. AllTasksComplete - the Job's onAllTasksComplete + attribute is set to terminatejob, and all Tasks in the Job are complete. + TaskFailed - the Job's onTaskFailure attribute is set to + performExitOptionsJobAction, and a Task in the Job failed with an exit + condition that specified a jobAction of terminatejob. Any other string is + a user-defined reason specified in a call to the 'Terminate a Job' + operation. + :type terminate_reason: str + """ + + _validation = { + 'start_time': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'scheduling_error': {'key': 'schedulingError', 'type': 'JobSchedulingError'}, + 'terminate_reason': {'key': 'terminateReason', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(JobExecutionInformation, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.pool_id = kwargs.get('pool_id', None) + self.scheduling_error = kwargs.get('scheduling_error', None) + self.terminate_reason = kwargs.get('terminate_reason', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_execution_information_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_execution_information_py3.py new file mode 100644 index 00000000..0097b52f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_execution_information_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobExecutionInformation(Model): + """Contains information about the execution of a Job in the Azure Batch + service. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the Job. This is the time + at which the Job was created. + :type start_time: datetime + :param end_time: The completion time of the Job. This property is set only + if the Job is in the completed state. + :type end_time: datetime + :param pool_id: The ID of the Pool to which this Job is assigned. This + element contains the actual Pool where the Job is assigned. When you get + Job details from the service, they also contain a poolInfo element, which + contains the Pool configuration data from when the Job was added or + updated. That poolInfo element may also contain a poolId element. If it + does, the two IDs are the same. If it does not, it means the Job ran on an + auto Pool, and this property contains the ID of that auto Pool. + :type pool_id: str + :param scheduling_error: Details of any error encountered by the service + in starting the Job. This property is not set if there was no error + starting the Job. + :type scheduling_error: ~azure.batch.models.JobSchedulingError + :param terminate_reason: A string describing the reason the Job ended. + This property is set only if the Job is in the completed state. If the + Batch service terminates the Job, it sets the reason as follows: + JMComplete - the Job Manager Task completed, and killJobOnCompletion was + set to true. MaxWallClockTimeExpiry - the Job reached its maxWallClockTime + constraint. TerminateJobSchedule - the Job ran as part of a schedule, and + the schedule terminated. AllTasksComplete - the Job's onAllTasksComplete + attribute is set to terminatejob, and all Tasks in the Job are complete. + TaskFailed - the Job's onTaskFailure attribute is set to + performExitOptionsJobAction, and a Task in the Job failed with an exit + condition that specified a jobAction of terminatejob. Any other string is + a user-defined reason specified in a call to the 'Terminate a Job' + operation. + :type terminate_reason: str + """ + + _validation = { + 'start_time': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'scheduling_error': {'key': 'schedulingError', 'type': 'JobSchedulingError'}, + 'terminate_reason': {'key': 'terminateReason', 'type': 'str'}, + } + + def __init__(self, *, start_time, end_time=None, pool_id: str=None, scheduling_error=None, terminate_reason: str=None, **kwargs) -> None: + super(JobExecutionInformation, self).__init__(**kwargs) + self.start_time = start_time + self.end_time = end_time + self.pool_id = pool_id + self.scheduling_error = scheduling_error + self.terminate_reason = terminate_reason diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_get_all_lifetime_statistics_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_get_all_lifetime_statistics_options.py new file mode 100644 index 00000000..a8f7e849 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_get_all_lifetime_statistics_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetAllLifetimeStatisticsOptions(Model): + """Additional parameters for get_all_lifetime_statistics operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobGetAllLifetimeStatisticsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_get_all_lifetime_statistics_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_get_all_lifetime_statistics_options_py3.py new file mode 100644 index 00000000..2092bbd8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_get_all_lifetime_statistics_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetAllLifetimeStatisticsOptions(Model): + """Additional parameters for get_all_lifetime_statistics operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobGetAllLifetimeStatisticsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_get_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_get_options.py new file mode 100644 index 00000000..62d47959 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_get_options.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_get_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_get_options_py3.py new file mode 100644 index 00000000..9ed21fc3 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_get_options_py3.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobGetOptions, self).__init__(**kwargs) + self.select = select + self.expand = expand + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_get_task_counts_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_get_task_counts_options.py new file mode 100644 index 00000000..603d79ce --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_get_task_counts_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetTaskCountsOptions(Model): + """Additional parameters for get_task_counts operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobGetTaskCountsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_get_task_counts_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_get_task_counts_options_py3.py new file mode 100644 index 00000000..b109e59e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_get_task_counts_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetTaskCountsOptions(Model): + """Additional parameters for get_task_counts operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobGetTaskCountsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_list_from_job_schedule_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_list_from_job_schedule_options.py new file mode 100644 index 00000000..3d6a86bd --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_list_from_job_schedule_options.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListFromJobScheduleOptions(Model): + """Additional parameters for list_from_job_schedule operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Jobs can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobListFromJobScheduleOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_list_from_job_schedule_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_list_from_job_schedule_options_py3.py new file mode 100644 index 00000000..6b9b05e1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_list_from_job_schedule_options_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListFromJobScheduleOptions(Model): + """Additional parameters for list_from_job_schedule operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Jobs can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobListFromJobScheduleOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.expand = expand + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_list_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_list_options.py new file mode 100644 index 00000000..6d926dba --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_list_options.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Jobs can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_list_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_list_options_py3.py new file mode 100644 index 00000000..356f33fa --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_list_options_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Jobs can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.expand = expand + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_list_preparation_and_release_task_status_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_list_preparation_and_release_task_status_options.py new file mode 100644 index 00000000..d6607e15 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_list_preparation_and_release_task_status_options.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListPreparationAndReleaseTaskStatusOptions(Model): + """Additional parameters for list_preparation_and_release_task_status + operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Tasks can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobListPreparationAndReleaseTaskStatusOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_list_preparation_and_release_task_status_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_list_preparation_and_release_task_status_options_py3.py new file mode 100644 index 00000000..a1332ba8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_list_preparation_and_release_task_status_options_py3.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListPreparationAndReleaseTaskStatusOptions(Model): + """Additional parameters for list_preparation_and_release_task_status + operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Tasks can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobListPreparationAndReleaseTaskStatusOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_manager_task.py b/azext/generated/sdk/batch/v2019_06_01/models/job_manager_task.py new file mode 100644 index 00000000..fde7d416 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_manager_task.py @@ -0,0 +1,185 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobManagerTask(Model): + """Specifies details of a Job Manager Task. + + The Job Manager Task is automatically started when the Job is created. The + Batch service tries to schedule the Job Manager Task before any other Tasks + in the Job. When shrinking a Pool, the Batch service tries to preserve + Nodes where Job Manager Tasks are running for as long as possible (that is, + Compute Nodes running 'normal' Tasks are removed before Compute Nodes + running Job Manager Tasks). When a Job Manager Task fails and needs to be + restarted, the system tries to schedule it at the highest priority. If + there are no idle Compute Nodes available, the system may terminate one of + the running Tasks in the Pool and return it to the queue in order to make + room for the Job Manager Task to restart. Note that a Job Manager Task in + one Job does not have priority over Tasks in other Jobs. Across Jobs, only + Job level priorities are observed. For example, if a Job Manager in a + priority 0 Job needs to be restarted, it will not displace Tasks of a + priority 1 Job. Batch will retry Tasks when a recovery operation is + triggered on a Node. Examples of recovery operations include (but are not + limited to) when an unhealthy Node is rebooted or a Compute Node + disappeared due to host failure. Retries due to recovery operations are + independent of and are not counted against the maxTaskRetryCount. Even if + the maxTaskRetryCount is 0, an internal retry due to a recovery operation + may occur. Because of this, all Tasks should be idempotent. This means + Tasks need to tolerate being interrupted and restarted without causing any + corruption or duplicate data. The best practice for long running Tasks is + to use some form of checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the Job Manager + Task within the Job. The ID can contain any combination of alphanumeric + characters including hyphens and underscores and cannot contain more than + 64 characters. + :type id: str + :param display_name: The display name of the Job Manager Task. It need not + be unique and can contain any Unicode characters up to a maximum length of + 1024. + :type display_name: str + :param command_line: Required. The command line of the Job Manager Task. + The command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the Task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Manager Task runs. If the Pool that will run this Task has + containerConfiguration set, this must be set as well. If the Pool that + will run this Task doesn't have containerConfiguration set, this must not + be set. When this is specified, all directories recursively below the + AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) + are mapped into the container, all Task environment variables are mapped + into the container, and the Task command line is executed in the + container. Files produced in the container outside of + AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning + that Batch file APIs will not be able to access those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. Files listed + under this element are located in the Task's working directory. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the Compute Node after running the command line. For multi-instance + Tasks, the files will only be uploaded from the Compute Node on which the + primary Task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the Job Manager Task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param constraints: Constraints that apply to the Job Manager Task. + :type constraints: ~azure.batch.models.TaskConstraints + :param kill_job_on_completion: Whether completion of the Job Manager Task + signifies completion of the entire Job. If true, when the Job Manager Task + completes, the Batch service marks the Job as complete. If any Tasks are + still running at this time (other than Job Release), those Tasks are + terminated. If false, the completion of the Job Manager Task does not + affect the Job status. In this case, you should either use the + onAllTasksComplete attribute to terminate the Job, or have a client or + user terminate the Job explicitly. An example of this is if the Job + Manager creates a set of Tasks but then takes no further role in their + execution. The default value is true. If you are using the + onAllTasksComplete and onTaskFailure attributes to control Job lifetime, + and using the Job Manager Task only to create the Tasks for the Job (not + to monitor progress), then it is important to set killJobOnCompletion to + false. + :type kill_job_on_completion: bool + :param user_identity: The user identity under which the Job Manager Task + runs. If omitted, the Task runs as a non-administrative user unique to the + Task. + :type user_identity: ~azure.batch.models.UserIdentity + :param run_exclusive: Whether the Job Manager Task requires exclusive use + of the Compute Node where it runs. If true, no other Tasks will run on the + same Node for as long as the Job Manager is running. If false, other Tasks + can run simultaneously with the Job Manager on a Compute Node. The Job + Manager Task counts normally against the Compute Node's concurrent Task + limit, so this is only relevant if the Compute Node allows multiple + concurrent Tasks. The default value is true. + :type run_exclusive: bool + :param application_package_references: A list of Application Packages that + the Batch service will deploy to the Compute Node before running the + command line. Application Packages are downloaded and deployed to a shared + directory, not the Task working directory. Therefore, if a referenced + Application Package is already on the Compute Node, and is up to date, + then it is not re-downloaded; the existing copy on the Compute Node is + used. If a referenced Application Package cannot be installed, for example + because the package has been deleted or because download failed, the Task + fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the Task can use to perform Batch service operations. If this + property is set, the Batch service provides the Task with an + authentication token which can be used to authenticate Batch service + operations without requiring an Account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the Task can carry out using the token depend on the settings. For + example, a Task can request Job permissions in order to add other Tasks to + the Job, or check the status of the Job or of other Tasks under the Job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + :param allow_low_priority_node: Whether the Job Manager Task may run on a + low-priority Compute Node. The default value is true. + :type allow_low_priority_node: bool + """ + + _validation = { + 'id': {'required': True}, + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'kill_job_on_completion': {'key': 'killJobOnCompletion', 'type': 'bool'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'run_exclusive': {'key': 'runExclusive', 'type': 'bool'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + 'allow_low_priority_node': {'key': 'allowLowPriorityNode', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(JobManagerTask, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.resource_files = kwargs.get('resource_files', None) + self.output_files = kwargs.get('output_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.constraints = kwargs.get('constraints', None) + self.kill_job_on_completion = kwargs.get('kill_job_on_completion', None) + self.user_identity = kwargs.get('user_identity', None) + self.run_exclusive = kwargs.get('run_exclusive', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.authentication_token_settings = kwargs.get('authentication_token_settings', None) + self.allow_low_priority_node = kwargs.get('allow_low_priority_node', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_manager_task_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_manager_task_py3.py new file mode 100644 index 00000000..cbcb37aa --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_manager_task_py3.py @@ -0,0 +1,185 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobManagerTask(Model): + """Specifies details of a Job Manager Task. + + The Job Manager Task is automatically started when the Job is created. The + Batch service tries to schedule the Job Manager Task before any other Tasks + in the Job. When shrinking a Pool, the Batch service tries to preserve + Nodes where Job Manager Tasks are running for as long as possible (that is, + Compute Nodes running 'normal' Tasks are removed before Compute Nodes + running Job Manager Tasks). When a Job Manager Task fails and needs to be + restarted, the system tries to schedule it at the highest priority. If + there are no idle Compute Nodes available, the system may terminate one of + the running Tasks in the Pool and return it to the queue in order to make + room for the Job Manager Task to restart. Note that a Job Manager Task in + one Job does not have priority over Tasks in other Jobs. Across Jobs, only + Job level priorities are observed. For example, if a Job Manager in a + priority 0 Job needs to be restarted, it will not displace Tasks of a + priority 1 Job. Batch will retry Tasks when a recovery operation is + triggered on a Node. Examples of recovery operations include (but are not + limited to) when an unhealthy Node is rebooted or a Compute Node + disappeared due to host failure. Retries due to recovery operations are + independent of and are not counted against the maxTaskRetryCount. Even if + the maxTaskRetryCount is 0, an internal retry due to a recovery operation + may occur. Because of this, all Tasks should be idempotent. This means + Tasks need to tolerate being interrupted and restarted without causing any + corruption or duplicate data. The best practice for long running Tasks is + to use some form of checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the Job Manager + Task within the Job. The ID can contain any combination of alphanumeric + characters including hyphens and underscores and cannot contain more than + 64 characters. + :type id: str + :param display_name: The display name of the Job Manager Task. It need not + be unique and can contain any Unicode characters up to a maximum length of + 1024. + :type display_name: str + :param command_line: Required. The command line of the Job Manager Task. + The command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the Task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Manager Task runs. If the Pool that will run this Task has + containerConfiguration set, this must be set as well. If the Pool that + will run this Task doesn't have containerConfiguration set, this must not + be set. When this is specified, all directories recursively below the + AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) + are mapped into the container, all Task environment variables are mapped + into the container, and the Task command line is executed in the + container. Files produced in the container outside of + AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning + that Batch file APIs will not be able to access those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. Files listed + under this element are located in the Task's working directory. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the Compute Node after running the command line. For multi-instance + Tasks, the files will only be uploaded from the Compute Node on which the + primary Task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the Job Manager Task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param constraints: Constraints that apply to the Job Manager Task. + :type constraints: ~azure.batch.models.TaskConstraints + :param kill_job_on_completion: Whether completion of the Job Manager Task + signifies completion of the entire Job. If true, when the Job Manager Task + completes, the Batch service marks the Job as complete. If any Tasks are + still running at this time (other than Job Release), those Tasks are + terminated. If false, the completion of the Job Manager Task does not + affect the Job status. In this case, you should either use the + onAllTasksComplete attribute to terminate the Job, or have a client or + user terminate the Job explicitly. An example of this is if the Job + Manager creates a set of Tasks but then takes no further role in their + execution. The default value is true. If you are using the + onAllTasksComplete and onTaskFailure attributes to control Job lifetime, + and using the Job Manager Task only to create the Tasks for the Job (not + to monitor progress), then it is important to set killJobOnCompletion to + false. + :type kill_job_on_completion: bool + :param user_identity: The user identity under which the Job Manager Task + runs. If omitted, the Task runs as a non-administrative user unique to the + Task. + :type user_identity: ~azure.batch.models.UserIdentity + :param run_exclusive: Whether the Job Manager Task requires exclusive use + of the Compute Node where it runs. If true, no other Tasks will run on the + same Node for as long as the Job Manager is running. If false, other Tasks + can run simultaneously with the Job Manager on a Compute Node. The Job + Manager Task counts normally against the Compute Node's concurrent Task + limit, so this is only relevant if the Compute Node allows multiple + concurrent Tasks. The default value is true. + :type run_exclusive: bool + :param application_package_references: A list of Application Packages that + the Batch service will deploy to the Compute Node before running the + command line. Application Packages are downloaded and deployed to a shared + directory, not the Task working directory. Therefore, if a referenced + Application Package is already on the Compute Node, and is up to date, + then it is not re-downloaded; the existing copy on the Compute Node is + used. If a referenced Application Package cannot be installed, for example + because the package has been deleted or because download failed, the Task + fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the Task can use to perform Batch service operations. If this + property is set, the Batch service provides the Task with an + authentication token which can be used to authenticate Batch service + operations without requiring an Account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the Task can carry out using the token depend on the settings. For + example, a Task can request Job permissions in order to add other Tasks to + the Job, or check the status of the Job or of other Tasks under the Job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + :param allow_low_priority_node: Whether the Job Manager Task may run on a + low-priority Compute Node. The default value is true. + :type allow_low_priority_node: bool + """ + + _validation = { + 'id': {'required': True}, + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'kill_job_on_completion': {'key': 'killJobOnCompletion', 'type': 'bool'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'run_exclusive': {'key': 'runExclusive', 'type': 'bool'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + 'allow_low_priority_node': {'key': 'allowLowPriorityNode', 'type': 'bool'}, + } + + def __init__(self, *, id: str, command_line: str, display_name: str=None, container_settings=None, resource_files=None, output_files=None, environment_settings=None, constraints=None, kill_job_on_completion: bool=None, user_identity=None, run_exclusive: bool=None, application_package_references=None, authentication_token_settings=None, allow_low_priority_node: bool=None, **kwargs) -> None: + super(JobManagerTask, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.command_line = command_line + self.container_settings = container_settings + self.resource_files = resource_files + self.output_files = output_files + self.environment_settings = environment_settings + self.constraints = constraints + self.kill_job_on_completion = kill_job_on_completion + self.user_identity = user_identity + self.run_exclusive = run_exclusive + self.application_package_references = application_package_references + self.authentication_token_settings = authentication_token_settings + self.allow_low_priority_node = allow_low_priority_node diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_network_configuration.py b/azext/generated/sdk/batch/v2019_06_01/models/job_network_configuration.py new file mode 100644 index 00000000..9a566c67 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_network_configuration.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobNetworkConfiguration(Model): + """The network configuration for the Job. + + All required parameters must be populated in order to send to Azure. + + :param subnet_id: Required. The ARM resource identifier of the virtual + network subnet which Compute Nodes running Tasks from the Job will join + for the duration of the Task. This will only work with a + VirtualMachineConfiguration Pool. The virtual network must be in the same + region and subscription as the Azure Batch Account. The specified subnet + should have enough free IP addresses to accommodate the number of Compute + Nodes which will run Tasks from the Job. This can be up to the number of + Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal + must have the 'Classic Virtual Machine Contributor' Role-Based Access + Control (RBAC) role for the specified VNet so that Azure Batch service can + schedule Tasks on the Nodes. This can be verified by checking if the + specified VNet has any associated Network Security Groups (NSG). If + communication to the Nodes in the specified subnet is denied by an NSG, + then the Batch service will set the state of the Compute Nodes to + unusable. This is of the form + /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. + If the specified VNet has any associated Network Security Groups (NSG), + then a few reserved system ports must be enabled for inbound communication + from the Azure Batch service. For Pools created with a Virtual Machine + configuration, enable ports 29876 and 29877, as well as port 22 for Linux + and port 3389 for Windows. Port 443 is also required to be open for + outbound connections for communications to Azure Storage. For more details + see: + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration + :type subnet_id: str + """ + + _validation = { + 'subnet_id': {'required': True}, + } + + _attribute_map = { + 'subnet_id': {'key': 'subnetId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(JobNetworkConfiguration, self).__init__(**kwargs) + self.subnet_id = kwargs.get('subnet_id', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_network_configuration_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_network_configuration_py3.py new file mode 100644 index 00000000..dfea024a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_network_configuration_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobNetworkConfiguration(Model): + """The network configuration for the Job. + + All required parameters must be populated in order to send to Azure. + + :param subnet_id: Required. The ARM resource identifier of the virtual + network subnet which Compute Nodes running Tasks from the Job will join + for the duration of the Task. This will only work with a + VirtualMachineConfiguration Pool. The virtual network must be in the same + region and subscription as the Azure Batch Account. The specified subnet + should have enough free IP addresses to accommodate the number of Compute + Nodes which will run Tasks from the Job. This can be up to the number of + Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal + must have the 'Classic Virtual Machine Contributor' Role-Based Access + Control (RBAC) role for the specified VNet so that Azure Batch service can + schedule Tasks on the Nodes. This can be verified by checking if the + specified VNet has any associated Network Security Groups (NSG). If + communication to the Nodes in the specified subnet is denied by an NSG, + then the Batch service will set the state of the Compute Nodes to + unusable. This is of the form + /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. + If the specified VNet has any associated Network Security Groups (NSG), + then a few reserved system ports must be enabled for inbound communication + from the Azure Batch service. For Pools created with a Virtual Machine + configuration, enable ports 29876 and 29877, as well as port 22 for Linux + and port 3389 for Windows. Port 443 is also required to be open for + outbound connections for communications to Azure Storage. For more details + see: + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration + :type subnet_id: str + """ + + _validation = { + 'subnet_id': {'required': True}, + } + + _attribute_map = { + 'subnet_id': {'key': 'subnetId', 'type': 'str'}, + } + + def __init__(self, *, subnet_id: str, **kwargs) -> None: + super(JobNetworkConfiguration, self).__init__(**kwargs) + self.subnet_id = subnet_id diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_patch_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_patch_options.py new file mode 100644 index 00000000..9fdbb4f3 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_patch_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobPatchOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_patch_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_patch_options_py3.py new file mode 100644 index 00000000..586e381d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_patch_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobPatchOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_patch_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/job_patch_parameter.py new file mode 100644 index 00000000..fb4046f4 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_patch_parameter.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPatchParameter(Model): + """The set of changes to be made to a Job. + + :param priority: The priority of the Job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. If omitted, the priority of the Job is left unchanged. + :type priority: int + :param on_all_tasks_complete: The action the Batch service should take + when all Tasks in the Job are in the completed state. If omitted, the + completion behavior is left unchanged. You may not change the value from + terminatejob to noaction - that is, once you have engaged automatic Job + termination, you cannot turn it off again. If you try to do this, the + request fails with an 'invalid property value' error response; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + Possible values include: 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param constraints: The execution constraints for the Job. If omitted, the + existing execution constraints are left unchanged. + :type constraints: ~azure.batch.models.JobConstraints + :param pool_info: The Pool on which the Batch service runs the Job's + Tasks. You may change the Pool for a Job only when the Job is disabled. + The Patch Job call will fail if you include the poolInfo element and the + Job is not disabled. If you specify an autoPoolSpecification specification + in the poolInfo, only the keepAlive property can be updated, and then only + if the auto Pool has a poolLifetimeOption of Job. If omitted, the Job + continues to run on its current Pool. + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with the Job as + metadata. If omitted, the existing Job metadata is left unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(JobPatchParameter, self).__init__(**kwargs) + self.priority = kwargs.get('priority', None) + self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) + self.constraints = kwargs.get('constraints', None) + self.pool_info = kwargs.get('pool_info', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_patch_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_patch_parameter_py3.py new file mode 100644 index 00000000..7bf9fc49 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_patch_parameter_py3.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPatchParameter(Model): + """The set of changes to be made to a Job. + + :param priority: The priority of the Job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. If omitted, the priority of the Job is left unchanged. + :type priority: int + :param on_all_tasks_complete: The action the Batch service should take + when all Tasks in the Job are in the completed state. If omitted, the + completion behavior is left unchanged. You may not change the value from + terminatejob to noaction - that is, once you have engaged automatic Job + termination, you cannot turn it off again. If you try to do this, the + request fails with an 'invalid property value' error response; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + Possible values include: 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param constraints: The execution constraints for the Job. If omitted, the + existing execution constraints are left unchanged. + :type constraints: ~azure.batch.models.JobConstraints + :param pool_info: The Pool on which the Batch service runs the Job's + Tasks. You may change the Pool for a Job only when the Job is disabled. + The Patch Job call will fail if you include the poolInfo element and the + Job is not disabled. If you specify an autoPoolSpecification specification + in the poolInfo, only the keepAlive property can be updated, and then only + if the auto Pool has a poolLifetimeOption of Job. If omitted, the Job + continues to run on its current Pool. + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with the Job as + metadata. If omitted, the existing Job metadata is left unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, priority: int=None, on_all_tasks_complete=None, constraints=None, pool_info=None, metadata=None, **kwargs) -> None: + super(JobPatchParameter, self).__init__(**kwargs) + self.priority = priority + self.on_all_tasks_complete = on_all_tasks_complete + self.constraints = constraints + self.pool_info = pool_info + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_preparation_and_release_task_execution_information.py b/azext/generated/sdk/batch/v2019_06_01/models/job_preparation_and_release_task_execution_information.py new file mode 100644 index 00000000..21fd6c88 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_preparation_and_release_task_execution_information.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationAndReleaseTaskExecutionInformation(Model): + """The status of the Job Preparation and Job Release Tasks on a Compute Node. + + :param pool_id: The ID of the Pool containing the Compute Node to which + this entry refers. + :type pool_id: str + :param node_id: The ID of the Compute Node to which this entry refers. + :type node_id: str + :param node_url: The URL of the Compute Node to which this entry refers. + :type node_url: str + :param job_preparation_task_execution_info: Information about the + execution status of the Job Preparation Task on this Compute Node. + :type job_preparation_task_execution_info: + ~azure.batch.models.JobPreparationTaskExecutionInformation + :param job_release_task_execution_info: Information about the execution + status of the Job Release Task on this Compute Node. This property is set + only if the Job Release Task has run on the Compute Node. + :type job_release_task_execution_info: + ~azure.batch.models.JobReleaseTaskExecutionInformation + """ + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'node_id': {'key': 'nodeId', 'type': 'str'}, + 'node_url': {'key': 'nodeUrl', 'type': 'str'}, + 'job_preparation_task_execution_info': {'key': 'jobPreparationTaskExecutionInfo', 'type': 'JobPreparationTaskExecutionInformation'}, + 'job_release_task_execution_info': {'key': 'jobReleaseTaskExecutionInfo', 'type': 'JobReleaseTaskExecutionInformation'}, + } + + def __init__(self, **kwargs): + super(JobPreparationAndReleaseTaskExecutionInformation, self).__init__(**kwargs) + self.pool_id = kwargs.get('pool_id', None) + self.node_id = kwargs.get('node_id', None) + self.node_url = kwargs.get('node_url', None) + self.job_preparation_task_execution_info = kwargs.get('job_preparation_task_execution_info', None) + self.job_release_task_execution_info = kwargs.get('job_release_task_execution_info', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_preparation_and_release_task_execution_information_paged.py b/azext/generated/sdk/batch/v2019_06_01/models/job_preparation_and_release_task_execution_information_paged.py new file mode 100644 index 00000000..f1f7d3c4 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_preparation_and_release_task_execution_information_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class JobPreparationAndReleaseTaskExecutionInformationPaged(Paged): + """ + A paging container for iterating over a list of :class:`JobPreparationAndReleaseTaskExecutionInformation ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[JobPreparationAndReleaseTaskExecutionInformation]'} + } + + def __init__(self, *args, **kwargs): + + super(JobPreparationAndReleaseTaskExecutionInformationPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_preparation_and_release_task_execution_information_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_preparation_and_release_task_execution_information_py3.py new file mode 100644 index 00000000..8c998d37 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_preparation_and_release_task_execution_information_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationAndReleaseTaskExecutionInformation(Model): + """The status of the Job Preparation and Job Release Tasks on a Compute Node. + + :param pool_id: The ID of the Pool containing the Compute Node to which + this entry refers. + :type pool_id: str + :param node_id: The ID of the Compute Node to which this entry refers. + :type node_id: str + :param node_url: The URL of the Compute Node to which this entry refers. + :type node_url: str + :param job_preparation_task_execution_info: Information about the + execution status of the Job Preparation Task on this Compute Node. + :type job_preparation_task_execution_info: + ~azure.batch.models.JobPreparationTaskExecutionInformation + :param job_release_task_execution_info: Information about the execution + status of the Job Release Task on this Compute Node. This property is set + only if the Job Release Task has run on the Compute Node. + :type job_release_task_execution_info: + ~azure.batch.models.JobReleaseTaskExecutionInformation + """ + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'node_id': {'key': 'nodeId', 'type': 'str'}, + 'node_url': {'key': 'nodeUrl', 'type': 'str'}, + 'job_preparation_task_execution_info': {'key': 'jobPreparationTaskExecutionInfo', 'type': 'JobPreparationTaskExecutionInformation'}, + 'job_release_task_execution_info': {'key': 'jobReleaseTaskExecutionInfo', 'type': 'JobReleaseTaskExecutionInformation'}, + } + + def __init__(self, *, pool_id: str=None, node_id: str=None, node_url: str=None, job_preparation_task_execution_info=None, job_release_task_execution_info=None, **kwargs) -> None: + super(JobPreparationAndReleaseTaskExecutionInformation, self).__init__(**kwargs) + self.pool_id = pool_id + self.node_id = node_id + self.node_url = node_url + self.job_preparation_task_execution_info = job_preparation_task_execution_info + self.job_release_task_execution_info = job_release_task_execution_info diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_preparation_task.py b/azext/generated/sdk/batch/v2019_06_01/models/job_preparation_task.py new file mode 100644 index 00000000..158747f4 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_preparation_task.py @@ -0,0 +1,147 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationTask(Model): + """A Job Preparation Task to run before any Tasks of the Job on any given + Compute Node. + + You can use Job Preparation to prepare a Node to run Tasks for the Job. + Activities commonly performed in Job Preparation include: Downloading + common resource files used by all the Tasks in the Job. The Job Preparation + Task can download these common resource files to the shared location on the + Node. (AZ_BATCH_NODE_ROOT_DIR\shared), or starting a local service on the + Node so that all Tasks of that Job can communicate with it. If the Job + Preparation Task fails (that is, exhausts its retry count before exiting + with exit code 0), Batch will not run Tasks of this Job on the Node. The + Compute Node remains ineligible to run Tasks of this Job until it is + reimaged. The Compute Node remains active and can be used for other Jobs. + The Job Preparation Task can run multiple times on the same Node. + Therefore, you should write the Job Preparation Task to handle + re-execution. If the Node is rebooted, the Job Preparation Task is run + again on the Compute Node before scheduling any other Task of the Job, if + rerunOnNodeRebootAfterSuccess is true or if the Job Preparation Task did + not previously complete. If the Node is reimaged, the Job Preparation Task + is run again before scheduling any Task of the Job. Batch will retry Tasks + when a recovery operation is triggered on a Node. Examples of recovery + operations include (but are not limited to) when an unhealthy Node is + rebooted or a Compute Node disappeared due to host failure. Retries due to + recovery operations are independent of and are not counted against the + maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal retry + due to a recovery operation may occur. Because of this, all Tasks should be + idempotent. This means Tasks need to tolerate being interrupted and + restarted without causing any corruption or duplicate data. The best + practice for long running Tasks is to use some form of checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: A string that uniquely identifies the Job Preparation Task + within the Job. The ID can contain any combination of alphanumeric + characters including hyphens and underscores and cannot contain more than + 64 characters. If you do not specify this property, the Batch service + assigns a default value of 'jobpreparation'. No other Task in the Job can + have the same ID as the Job Preparation Task. If you try to submit a Task + with the same id, the Batch service rejects the request with error code + TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, + the HTTP status code is 409 (Conflict). + :type id: str + :param command_line: Required. The command line of the Job Preparation + Task. The command line does not run under a shell, and therefore cannot + take advantage of shell features such as environment variable expansion. + If you want to take advantage of such features, you should invoke the + shell in the command line, for example using "cmd /c MyCommand" in Windows + or "/bin/sh -c MyCommand" in Linux. If the command line refers to file + paths, it should use a relative path (relative to the Task working + directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Preparation Task runs. When this is specified, all directories + recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch + directories on the node) are mapped into the container, all Task + environment variables are mapped into the container, and the Task command + line is executed in the container. Files produced in the container outside + of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning + that Batch file APIs will not be able to access those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. Files listed + under this element are located in the Task's working directory. There is + a maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the Job Preparation Task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param constraints: Constraints that apply to the Job Preparation Task. + :type constraints: ~azure.batch.models.TaskConstraints + :param wait_for_success: Whether the Batch service should wait for the Job + Preparation Task to complete successfully before scheduling any other + Tasks of the Job on the Compute Node. A Job Preparation Task has completed + successfully if it exits with exit code 0. If true and the Job Preparation + Task fails on a Node, the Batch service retries the Job Preparation Task + up to its maximum retry count (as specified in the constraints element). + If the Task has still not completed successfully after all retries, then + the Batch service will not schedule Tasks of the Job to the Node. The Node + remains active and eligible to run Tasks of other Jobs. If false, the + Batch service will not wait for the Job Preparation Task to complete. In + this case, other Tasks of the Job can start executing on the Compute Node + while the Job Preparation Task is still running; and even if the Job + Preparation Task fails, new Tasks will continue to be scheduled on the + Compute Node. The default value is true. + :type wait_for_success: bool + :param user_identity: The user identity under which the Job Preparation + Task runs. If omitted, the Task runs as a non-administrative user unique + to the Task on Windows Compute Nodes, or a non-administrative user unique + to the Pool on Linux Compute Nodes. + :type user_identity: ~azure.batch.models.UserIdentity + :param rerun_on_node_reboot_after_success: Whether the Batch service + should rerun the Job Preparation Task after a Compute Node reboots. The + Job Preparation Task is always rerun if a Compute Node is reimaged, or if + the Job Preparation Task did not complete (e.g. because the reboot + occurred while the Task was running). Therefore, you should always write a + Job Preparation Task to be idempotent and to behave correctly if run + multiple times. The default value is true. + :type rerun_on_node_reboot_after_success: bool + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'rerun_on_node_reboot_after_success': {'key': 'rerunOnNodeRebootAfterSuccess', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(JobPreparationTask, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.resource_files = kwargs.get('resource_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.constraints = kwargs.get('constraints', None) + self.wait_for_success = kwargs.get('wait_for_success', None) + self.user_identity = kwargs.get('user_identity', None) + self.rerun_on_node_reboot_after_success = kwargs.get('rerun_on_node_reboot_after_success', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_preparation_task_execution_information.py b/azext/generated/sdk/batch/v2019_06_01/models/job_preparation_task_execution_information.py new file mode 100644 index 00000000..3e1edeff --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_preparation_task_execution_information.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationTaskExecutionInformation(Model): + """Contains information about the execution of a Job Preparation Task on a + Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The time at which the Task started running. + If the Task has been restarted or retried, this is the most recent time at + which the Task started running. + :type start_time: datetime + :param end_time: The time at which the Job Preparation Task completed. + This property is set only if the Task is in the Completed state. + :type end_time: datetime + :param state: Required. The current state of the Job Preparation Task on + the Compute Node. Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.JobPreparationTaskState + :param task_root_directory: The root directory of the Job Preparation Task + on the Compute Node. You can use this path to retrieve files created by + the Task, such as log files. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the Job + Preparation Task on the Compute Node. + :type task_root_directory_url: str + :param exit_code: The exit code of the program specified on the Task + command line. This parameter is returned only if the Task is in the + completed state. The exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. Note + that the exit code may also be generated by the Compute Node operating + system, such as when a process is forcibly terminated. + :type exit_code: int + :param container_info: Information about the container under which the + Task is executing. This property is set only if the Task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the Task failure, if any. This + property is set only if the Task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the Task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the Task could not be run) and + file upload errors are not retried. The Batch service will retry the Task + up to the limit specified by the constraints. Task application failures + (non-zero exit code) are retried, pre-processing errors (the Task could + not be run) and file upload errors are not retried. The Batch service will + retry the Task up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the Job + Preparation Task started running. This property is set only if the Task + was retried (i.e. retryCount is nonzero). If present, this is typically + the same as startTime, but may be different if the Task has been restarted + for reasons other than retry; for example, if the Compute Node was + rebooted during a retry, then the startTime is updated but the + lastRetryTime is not. + :type last_retry_time: datetime + :param result: The result of the Task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'start_time': {'required': True}, + 'state': {'required': True}, + 'retry_count': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobPreparationTaskState'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, **kwargs): + super(JobPreparationTaskExecutionInformation, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.state = kwargs.get('state', None) + self.task_root_directory = kwargs.get('task_root_directory', None) + self.task_root_directory_url = kwargs.get('task_root_directory_url', None) + self.exit_code = kwargs.get('exit_code', None) + self.container_info = kwargs.get('container_info', None) + self.failure_info = kwargs.get('failure_info', None) + self.retry_count = kwargs.get('retry_count', None) + self.last_retry_time = kwargs.get('last_retry_time', None) + self.result = kwargs.get('result', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_preparation_task_execution_information_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_preparation_task_execution_information_py3.py new file mode 100644 index 00000000..373d5b23 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_preparation_task_execution_information_py3.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationTaskExecutionInformation(Model): + """Contains information about the execution of a Job Preparation Task on a + Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The time at which the Task started running. + If the Task has been restarted or retried, this is the most recent time at + which the Task started running. + :type start_time: datetime + :param end_time: The time at which the Job Preparation Task completed. + This property is set only if the Task is in the Completed state. + :type end_time: datetime + :param state: Required. The current state of the Job Preparation Task on + the Compute Node. Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.JobPreparationTaskState + :param task_root_directory: The root directory of the Job Preparation Task + on the Compute Node. You can use this path to retrieve files created by + the Task, such as log files. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the Job + Preparation Task on the Compute Node. + :type task_root_directory_url: str + :param exit_code: The exit code of the program specified on the Task + command line. This parameter is returned only if the Task is in the + completed state. The exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. Note + that the exit code may also be generated by the Compute Node operating + system, such as when a process is forcibly terminated. + :type exit_code: int + :param container_info: Information about the container under which the + Task is executing. This property is set only if the Task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the Task failure, if any. This + property is set only if the Task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the Task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the Task could not be run) and + file upload errors are not retried. The Batch service will retry the Task + up to the limit specified by the constraints. Task application failures + (non-zero exit code) are retried, pre-processing errors (the Task could + not be run) and file upload errors are not retried. The Batch service will + retry the Task up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the Job + Preparation Task started running. This property is set only if the Task + was retried (i.e. retryCount is nonzero). If present, this is typically + the same as startTime, but may be different if the Task has been restarted + for reasons other than retry; for example, if the Compute Node was + rebooted during a retry, then the startTime is updated but the + lastRetryTime is not. + :type last_retry_time: datetime + :param result: The result of the Task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'start_time': {'required': True}, + 'state': {'required': True}, + 'retry_count': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobPreparationTaskState'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, *, start_time, state, retry_count: int, end_time=None, task_root_directory: str=None, task_root_directory_url: str=None, exit_code: int=None, container_info=None, failure_info=None, last_retry_time=None, result=None, **kwargs) -> None: + super(JobPreparationTaskExecutionInformation, self).__init__(**kwargs) + self.start_time = start_time + self.end_time = end_time + self.state = state + self.task_root_directory = task_root_directory + self.task_root_directory_url = task_root_directory_url + self.exit_code = exit_code + self.container_info = container_info + self.failure_info = failure_info + self.retry_count = retry_count + self.last_retry_time = last_retry_time + self.result = result diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_preparation_task_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_preparation_task_py3.py new file mode 100644 index 00000000..53dc875d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_preparation_task_py3.py @@ -0,0 +1,147 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationTask(Model): + """A Job Preparation Task to run before any Tasks of the Job on any given + Compute Node. + + You can use Job Preparation to prepare a Node to run Tasks for the Job. + Activities commonly performed in Job Preparation include: Downloading + common resource files used by all the Tasks in the Job. The Job Preparation + Task can download these common resource files to the shared location on the + Node. (AZ_BATCH_NODE_ROOT_DIR\shared), or starting a local service on the + Node so that all Tasks of that Job can communicate with it. If the Job + Preparation Task fails (that is, exhausts its retry count before exiting + with exit code 0), Batch will not run Tasks of this Job on the Node. The + Compute Node remains ineligible to run Tasks of this Job until it is + reimaged. The Compute Node remains active and can be used for other Jobs. + The Job Preparation Task can run multiple times on the same Node. + Therefore, you should write the Job Preparation Task to handle + re-execution. If the Node is rebooted, the Job Preparation Task is run + again on the Compute Node before scheduling any other Task of the Job, if + rerunOnNodeRebootAfterSuccess is true or if the Job Preparation Task did + not previously complete. If the Node is reimaged, the Job Preparation Task + is run again before scheduling any Task of the Job. Batch will retry Tasks + when a recovery operation is triggered on a Node. Examples of recovery + operations include (but are not limited to) when an unhealthy Node is + rebooted or a Compute Node disappeared due to host failure. Retries due to + recovery operations are independent of and are not counted against the + maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal retry + due to a recovery operation may occur. Because of this, all Tasks should be + idempotent. This means Tasks need to tolerate being interrupted and + restarted without causing any corruption or duplicate data. The best + practice for long running Tasks is to use some form of checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: A string that uniquely identifies the Job Preparation Task + within the Job. The ID can contain any combination of alphanumeric + characters including hyphens and underscores and cannot contain more than + 64 characters. If you do not specify this property, the Batch service + assigns a default value of 'jobpreparation'. No other Task in the Job can + have the same ID as the Job Preparation Task. If you try to submit a Task + with the same id, the Batch service rejects the request with error code + TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, + the HTTP status code is 409 (Conflict). + :type id: str + :param command_line: Required. The command line of the Job Preparation + Task. The command line does not run under a shell, and therefore cannot + take advantage of shell features such as environment variable expansion. + If you want to take advantage of such features, you should invoke the + shell in the command line, for example using "cmd /c MyCommand" in Windows + or "/bin/sh -c MyCommand" in Linux. If the command line refers to file + paths, it should use a relative path (relative to the Task working + directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Preparation Task runs. When this is specified, all directories + recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch + directories on the node) are mapped into the container, all Task + environment variables are mapped into the container, and the Task command + line is executed in the container. Files produced in the container outside + of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning + that Batch file APIs will not be able to access those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. Files listed + under this element are located in the Task's working directory. There is + a maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the Job Preparation Task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param constraints: Constraints that apply to the Job Preparation Task. + :type constraints: ~azure.batch.models.TaskConstraints + :param wait_for_success: Whether the Batch service should wait for the Job + Preparation Task to complete successfully before scheduling any other + Tasks of the Job on the Compute Node. A Job Preparation Task has completed + successfully if it exits with exit code 0. If true and the Job Preparation + Task fails on a Node, the Batch service retries the Job Preparation Task + up to its maximum retry count (as specified in the constraints element). + If the Task has still not completed successfully after all retries, then + the Batch service will not schedule Tasks of the Job to the Node. The Node + remains active and eligible to run Tasks of other Jobs. If false, the + Batch service will not wait for the Job Preparation Task to complete. In + this case, other Tasks of the Job can start executing on the Compute Node + while the Job Preparation Task is still running; and even if the Job + Preparation Task fails, new Tasks will continue to be scheduled on the + Compute Node. The default value is true. + :type wait_for_success: bool + :param user_identity: The user identity under which the Job Preparation + Task runs. If omitted, the Task runs as a non-administrative user unique + to the Task on Windows Compute Nodes, or a non-administrative user unique + to the Pool on Linux Compute Nodes. + :type user_identity: ~azure.batch.models.UserIdentity + :param rerun_on_node_reboot_after_success: Whether the Batch service + should rerun the Job Preparation Task after a Compute Node reboots. The + Job Preparation Task is always rerun if a Compute Node is reimaged, or if + the Job Preparation Task did not complete (e.g. because the reboot + occurred while the Task was running). Therefore, you should always write a + Job Preparation Task to be idempotent and to behave correctly if run + multiple times. The default value is true. + :type rerun_on_node_reboot_after_success: bool + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'rerun_on_node_reboot_after_success': {'key': 'rerunOnNodeRebootAfterSuccess', 'type': 'bool'}, + } + + def __init__(self, *, command_line: str, id: str=None, container_settings=None, resource_files=None, environment_settings=None, constraints=None, wait_for_success: bool=None, user_identity=None, rerun_on_node_reboot_after_success: bool=None, **kwargs) -> None: + super(JobPreparationTask, self).__init__(**kwargs) + self.id = id + self.command_line = command_line + self.container_settings = container_settings + self.resource_files = resource_files + self.environment_settings = environment_settings + self.constraints = constraints + self.wait_for_success = wait_for_success + self.user_identity = user_identity + self.rerun_on_node_reboot_after_success = rerun_on_node_reboot_after_success diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_release_task.py b/azext/generated/sdk/batch/v2019_06_01/models/job_release_task.py new file mode 100644 index 00000000..b68410a1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_release_task.py @@ -0,0 +1,123 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobReleaseTask(Model): + """A Job Release Task to run on Job completion on any Compute Node where the + Job has run. + + The Job Release Task runs when the Job ends, because of one of the + following: The user calls the Terminate Job API, or the Delete Job API + while the Job is still active, the Job's maximum wall clock time constraint + is reached, and the Job is still active, or the Job's Job Manager Task + completed, and the Job is configured to terminate when the Job Manager + completes. The Job Release Task runs on each Node where Tasks of the Job + have run and the Job Preparation Task ran and completed. If you reimage a + Node after it has run the Job Preparation Task, and the Job ends without + any further Tasks of the Job running on that Node (and hence the Job + Preparation Task does not re-run), then the Job Release Task does not run + on that Compute Node. If a Node reboots while the Job Release Task is still + running, the Job Release Task runs again when the Compute Node starts up. + The Job is not marked as complete until all Job Release Tasks have + completed. The Job Release Task runs in the background. It does not occupy + a scheduling slot; that is, it does not count towards the maxTasksPerNode + limit specified on the Pool. + + All required parameters must be populated in order to send to Azure. + + :param id: A string that uniquely identifies the Job Release Task within + the Job. The ID can contain any combination of alphanumeric characters + including hyphens and underscores and cannot contain more than 64 + characters. If you do not specify this property, the Batch service assigns + a default value of 'jobrelease'. No other Task in the Job can have the + same ID as the Job Release Task. If you try to submit a Task with the same + id, the Batch service rejects the request with error code + TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the + HTTP status code is 409 (Conflict). + :type id: str + :param command_line: Required. The command line of the Job Release Task. + The command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the Task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Release Task runs. When this is specified, all directories recursively + below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on + the node) are mapped into the container, all Task environment variables + are mapped into the container, and the Task command line is executed in + the container. Files produced in the container outside of + AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning + that Batch file APIs will not be able to access those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. Files listed under this + element are located in the Task's working directory. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the Job Release Task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param max_wall_clock_time: The maximum elapsed time that the Job Release + Task may run on a given Compute Node, measured from the time the Task + starts. If the Task does not complete within the time limit, the Batch + service terminates it. The default value is 15 minutes. You may not + specify a timeout longer than 15 minutes. If you do, the Batch service + rejects it with an error; if you are calling the REST API directly, the + HTTP status code is 400 (Bad Request). + :type max_wall_clock_time: timedelta + :param retention_time: The minimum time to retain the Task directory for + the Job Release Task on the Compute Node. After this time, the Batch + service may delete the Task directory and all its contents. The default is + 7 days, i.e. the Task directory will be retained for 7 days unless the + Compute Node is removed or the Job is deleted. + :type retention_time: timedelta + :param user_identity: The user identity under which the Job Release Task + runs. If omitted, the Task runs as a non-administrative user unique to the + Task. + :type user_identity: ~azure.batch.models.UserIdentity + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'retention_time': {'key': 'retentionTime', 'type': 'duration'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + } + + def __init__(self, **kwargs): + super(JobReleaseTask, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.resource_files = kwargs.get('resource_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.max_wall_clock_time = kwargs.get('max_wall_clock_time', None) + self.retention_time = kwargs.get('retention_time', None) + self.user_identity = kwargs.get('user_identity', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_release_task_execution_information.py b/azext/generated/sdk/batch/v2019_06_01/models/job_release_task_execution_information.py new file mode 100644 index 00000000..0cb6fc16 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_release_task_execution_information.py @@ -0,0 +1,89 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobReleaseTaskExecutionInformation(Model): + """Contains information about the execution of a Job Release Task on a Compute + Node. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The time at which the Task started running. + If the Task has been restarted or retried, this is the most recent time at + which the Task started running. + :type start_time: datetime + :param end_time: The time at which the Job Release Task completed. This + property is set only if the Task is in the Completed state. + :type end_time: datetime + :param state: Required. The current state of the Job Release Task on the + Compute Node. Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.JobReleaseTaskState + :param task_root_directory: The root directory of the Job Release Task on + the Compute Node. You can use this path to retrieve files created by the + Task, such as log files. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the Job + Release Task on the Compute Node. + :type task_root_directory_url: str + :param exit_code: The exit code of the program specified on the Task + command line. This parameter is returned only if the Task is in the + completed state. The exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. Note + that the exit code may also be generated by the Compute Node operating + system, such as when a process is forcibly terminated. + :type exit_code: int + :param container_info: Information about the container under which the + Task is executing. This property is set only if the Task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the Task failure, if any. This + property is set only if the Task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param result: The result of the Task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'start_time': {'required': True}, + 'state': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobReleaseTaskState'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, **kwargs): + super(JobReleaseTaskExecutionInformation, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.state = kwargs.get('state', None) + self.task_root_directory = kwargs.get('task_root_directory', None) + self.task_root_directory_url = kwargs.get('task_root_directory_url', None) + self.exit_code = kwargs.get('exit_code', None) + self.container_info = kwargs.get('container_info', None) + self.failure_info = kwargs.get('failure_info', None) + self.result = kwargs.get('result', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_release_task_execution_information_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_release_task_execution_information_py3.py new file mode 100644 index 00000000..f83c2895 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_release_task_execution_information_py3.py @@ -0,0 +1,89 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobReleaseTaskExecutionInformation(Model): + """Contains information about the execution of a Job Release Task on a Compute + Node. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The time at which the Task started running. + If the Task has been restarted or retried, this is the most recent time at + which the Task started running. + :type start_time: datetime + :param end_time: The time at which the Job Release Task completed. This + property is set only if the Task is in the Completed state. + :type end_time: datetime + :param state: Required. The current state of the Job Release Task on the + Compute Node. Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.JobReleaseTaskState + :param task_root_directory: The root directory of the Job Release Task on + the Compute Node. You can use this path to retrieve files created by the + Task, such as log files. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the Job + Release Task on the Compute Node. + :type task_root_directory_url: str + :param exit_code: The exit code of the program specified on the Task + command line. This parameter is returned only if the Task is in the + completed state. The exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. Note + that the exit code may also be generated by the Compute Node operating + system, such as when a process is forcibly terminated. + :type exit_code: int + :param container_info: Information about the container under which the + Task is executing. This property is set only if the Task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the Task failure, if any. This + property is set only if the Task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param result: The result of the Task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'start_time': {'required': True}, + 'state': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobReleaseTaskState'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, *, start_time, state, end_time=None, task_root_directory: str=None, task_root_directory_url: str=None, exit_code: int=None, container_info=None, failure_info=None, result=None, **kwargs) -> None: + super(JobReleaseTaskExecutionInformation, self).__init__(**kwargs) + self.start_time = start_time + self.end_time = end_time + self.state = state + self.task_root_directory = task_root_directory + self.task_root_directory_url = task_root_directory_url + self.exit_code = exit_code + self.container_info = container_info + self.failure_info = failure_info + self.result = result diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_release_task_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_release_task_py3.py new file mode 100644 index 00000000..70e7615c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_release_task_py3.py @@ -0,0 +1,123 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobReleaseTask(Model): + """A Job Release Task to run on Job completion on any Compute Node where the + Job has run. + + The Job Release Task runs when the Job ends, because of one of the + following: The user calls the Terminate Job API, or the Delete Job API + while the Job is still active, the Job's maximum wall clock time constraint + is reached, and the Job is still active, or the Job's Job Manager Task + completed, and the Job is configured to terminate when the Job Manager + completes. The Job Release Task runs on each Node where Tasks of the Job + have run and the Job Preparation Task ran and completed. If you reimage a + Node after it has run the Job Preparation Task, and the Job ends without + any further Tasks of the Job running on that Node (and hence the Job + Preparation Task does not re-run), then the Job Release Task does not run + on that Compute Node. If a Node reboots while the Job Release Task is still + running, the Job Release Task runs again when the Compute Node starts up. + The Job is not marked as complete until all Job Release Tasks have + completed. The Job Release Task runs in the background. It does not occupy + a scheduling slot; that is, it does not count towards the maxTasksPerNode + limit specified on the Pool. + + All required parameters must be populated in order to send to Azure. + + :param id: A string that uniquely identifies the Job Release Task within + the Job. The ID can contain any combination of alphanumeric characters + including hyphens and underscores and cannot contain more than 64 + characters. If you do not specify this property, the Batch service assigns + a default value of 'jobrelease'. No other Task in the Job can have the + same ID as the Job Release Task. If you try to submit a Task with the same + id, the Batch service rejects the request with error code + TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the + HTTP status code is 409 (Conflict). + :type id: str + :param command_line: Required. The command line of the Job Release Task. + The command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the Task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Release Task runs. When this is specified, all directories recursively + below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on + the node) are mapped into the container, all Task environment variables + are mapped into the container, and the Task command line is executed in + the container. Files produced in the container outside of + AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning + that Batch file APIs will not be able to access those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. Files listed under this + element are located in the Task's working directory. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the Job Release Task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param max_wall_clock_time: The maximum elapsed time that the Job Release + Task may run on a given Compute Node, measured from the time the Task + starts. If the Task does not complete within the time limit, the Batch + service terminates it. The default value is 15 minutes. You may not + specify a timeout longer than 15 minutes. If you do, the Batch service + rejects it with an error; if you are calling the REST API directly, the + HTTP status code is 400 (Bad Request). + :type max_wall_clock_time: timedelta + :param retention_time: The minimum time to retain the Task directory for + the Job Release Task on the Compute Node. After this time, the Batch + service may delete the Task directory and all its contents. The default is + 7 days, i.e. the Task directory will be retained for 7 days unless the + Compute Node is removed or the Job is deleted. + :type retention_time: timedelta + :param user_identity: The user identity under which the Job Release Task + runs. If omitted, the Task runs as a non-administrative user unique to the + Task. + :type user_identity: ~azure.batch.models.UserIdentity + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'retention_time': {'key': 'retentionTime', 'type': 'duration'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + } + + def __init__(self, *, command_line: str, id: str=None, container_settings=None, resource_files=None, environment_settings=None, max_wall_clock_time=None, retention_time=None, user_identity=None, **kwargs) -> None: + super(JobReleaseTask, self).__init__(**kwargs) + self.id = id + self.command_line = command_line + self.container_settings = container_settings + self.resource_files = resource_files + self.environment_settings = environment_settings + self.max_wall_clock_time = max_wall_clock_time + self.retention_time = retention_time + self.user_identity = user_identity diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_add_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_add_options.py new file mode 100644 index 00000000..6c03aaff --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_add_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleAddOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_add_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_add_options_py3.py new file mode 100644 index 00000000..fe7b76cc --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_add_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobScheduleAddOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_add_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_add_parameter.py new file mode 100644 index 00000000..3a38b677 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_add_parameter.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleAddParameter(Model): + """A Job Schedule that allows recurring Jobs by specifying when to run Jobs + and a specification used to create each Job. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the schedule within + the Account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within an Account that differ only by case). + :type id: str + :param display_name: The display name for the schedule. The display name + need not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param schedule: Required. The schedule according to which Jobs will be + created. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: Required. The details of the Jobs to be created + on this schedule. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the schedule + as metadata. The Batch service does not assign any meaning to metadata; it + is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'id': {'required': True}, + 'schedule': {'required': True}, + 'job_specification': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(JobScheduleAddParameter, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.schedule = kwargs.get('schedule', None) + self.job_specification = kwargs.get('job_specification', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_add_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_add_parameter_py3.py new file mode 100644 index 00000000..fe16af9b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_add_parameter_py3.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleAddParameter(Model): + """A Job Schedule that allows recurring Jobs by specifying when to run Jobs + and a specification used to create each Job. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the schedule within + the Account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within an Account that differ only by case). + :type id: str + :param display_name: The display name for the schedule. The display name + need not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param schedule: Required. The schedule according to which Jobs will be + created. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: Required. The details of the Jobs to be created + on this schedule. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the schedule + as metadata. The Batch service does not assign any meaning to metadata; it + is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'id': {'required': True}, + 'schedule': {'required': True}, + 'job_specification': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, id: str, schedule, job_specification, display_name: str=None, metadata=None, **kwargs) -> None: + super(JobScheduleAddParameter, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.schedule = schedule + self.job_specification = job_specification + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_delete_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_delete_options.py new file mode 100644 index 00000000..a7e01118 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_delete_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleDeleteOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_delete_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_delete_options_py3.py new file mode 100644 index 00000000..89ae9986 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_delete_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleDeleteOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_disable_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_disable_options.py new file mode 100644 index 00000000..9384c1fb --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_disable_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleDisableOptions(Model): + """Additional parameters for disable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleDisableOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_disable_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_disable_options_py3.py new file mode 100644 index 00000000..83adbe53 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_disable_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleDisableOptions(Model): + """Additional parameters for disable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleDisableOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_enable_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_enable_options.py new file mode 100644 index 00000000..a296d530 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_enable_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleEnableOptions(Model): + """Additional parameters for enable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleEnableOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_enable_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_enable_options_py3.py new file mode 100644 index 00000000..daa4d087 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_enable_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleEnableOptions(Model): + """Additional parameters for enable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleEnableOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_execution_information.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_execution_information.py new file mode 100644 index 00000000..1e38ec3d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_execution_information.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleExecutionInformation(Model): + """Contains information about Jobs that have been and will be run under a Job + Schedule. + + :param next_run_time: The next time at which a Job will be created under + this schedule. This property is meaningful only if the schedule is in the + active state when the time comes around. For example, if the schedule is + disabled, no Job will be created at nextRunTime unless the Job is enabled + before then. + :type next_run_time: datetime + :param recent_job: Information about the most recent Job under the Job + Schedule. This property is present only if the at least one Job has run + under the schedule. + :type recent_job: ~azure.batch.models.RecentJob + :param end_time: The time at which the schedule ended. This property is + set only if the Job Schedule is in the completed state. + :type end_time: datetime + """ + + _attribute_map = { + 'next_run_time': {'key': 'nextRunTime', 'type': 'iso-8601'}, + 'recent_job': {'key': 'recentJob', 'type': 'RecentJob'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(JobScheduleExecutionInformation, self).__init__(**kwargs) + self.next_run_time = kwargs.get('next_run_time', None) + self.recent_job = kwargs.get('recent_job', None) + self.end_time = kwargs.get('end_time', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_execution_information_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_execution_information_py3.py new file mode 100644 index 00000000..bfc1afc3 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_execution_information_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleExecutionInformation(Model): + """Contains information about Jobs that have been and will be run under a Job + Schedule. + + :param next_run_time: The next time at which a Job will be created under + this schedule. This property is meaningful only if the schedule is in the + active state when the time comes around. For example, if the schedule is + disabled, no Job will be created at nextRunTime unless the Job is enabled + before then. + :type next_run_time: datetime + :param recent_job: Information about the most recent Job under the Job + Schedule. This property is present only if the at least one Job has run + under the schedule. + :type recent_job: ~azure.batch.models.RecentJob + :param end_time: The time at which the schedule ended. This property is + set only if the Job Schedule is in the completed state. + :type end_time: datetime + """ + + _attribute_map = { + 'next_run_time': {'key': 'nextRunTime', 'type': 'iso-8601'}, + 'recent_job': {'key': 'recentJob', 'type': 'RecentJob'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + } + + def __init__(self, *, next_run_time=None, recent_job=None, end_time=None, **kwargs) -> None: + super(JobScheduleExecutionInformation, self).__init__(**kwargs) + self.next_run_time = next_run_time + self.recent_job = recent_job + self.end_time = end_time diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_exists_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_exists_options.py new file mode 100644 index 00000000..c4f228d7 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_exists_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleExistsOptions(Model): + """Additional parameters for exists operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleExistsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_exists_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_exists_options_py3.py new file mode 100644 index 00000000..da8e15d2 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_exists_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleExistsOptions(Model): + """Additional parameters for exists operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleExistsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_get_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_get_options.py new file mode 100644 index 00000000..434b0ab1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_get_options.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_get_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_get_options_py3.py new file mode 100644 index 00000000..11ee540f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_get_options_py3.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleGetOptions, self).__init__(**kwargs) + self.select = select + self.expand = expand + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_list_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_list_options.py new file mode 100644 index 00000000..4778b41e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_list_options.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Job Schedules can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_list_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_list_options_py3.py new file mode 100644 index 00000000..834a0af2 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_list_options_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Job Schedules can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobScheduleListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.expand = expand + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_patch_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_patch_options.py new file mode 100644 index 00000000..841e56e9 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_patch_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulePatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobSchedulePatchOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_patch_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_patch_options_py3.py new file mode 100644 index 00000000..06e4f626 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_patch_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulePatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobSchedulePatchOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_patch_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_patch_parameter.py new file mode 100644 index 00000000..f0cbdf2f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_patch_parameter.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulePatchParameter(Model): + """The set of changes to be made to a Job Schedule. + + :param schedule: The schedule according to which Jobs will be created. If + you do not specify this element, the existing schedule is left unchanged. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: The details of the Jobs to be created on this + schedule. Updates affect only Jobs that are started after the update has + taken place. Any currently active Job continues with the older + specification. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the Job + Schedule as metadata. If you do not specify this element, existing + metadata is left unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(JobSchedulePatchParameter, self).__init__(**kwargs) + self.schedule = kwargs.get('schedule', None) + self.job_specification = kwargs.get('job_specification', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_patch_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_patch_parameter_py3.py new file mode 100644 index 00000000..a9694ade --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_patch_parameter_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulePatchParameter(Model): + """The set of changes to be made to a Job Schedule. + + :param schedule: The schedule according to which Jobs will be created. If + you do not specify this element, the existing schedule is left unchanged. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: The details of the Jobs to be created on this + schedule. Updates affect only Jobs that are started after the update has + taken place. Any currently active Job continues with the older + specification. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the Job + Schedule as metadata. If you do not specify this element, existing + metadata is left unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, schedule=None, job_specification=None, metadata=None, **kwargs) -> None: + super(JobSchedulePatchParameter, self).__init__(**kwargs) + self.schedule = schedule + self.job_specification = job_specification + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_statistics.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_statistics.py new file mode 100644 index 00000000..ea33b382 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_statistics.py @@ -0,0 +1,126 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleStatistics(Model): + """Resource usage statistics for a Job Schedule. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all Compute Nodes) consumed by all Tasks in all Jobs + created under the schedule. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all Compute Nodes) consumed by all Tasks in all Jobs + created under the schedule. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of all the + Tasks in all the Jobs created under the schedule. The wall clock time is + the elapsed time from when the Task started running on a Compute Node to + when it finished (or to the last time the statistics were updated, if the + Task had not finished by then). If a Task was retried, this includes the + wall clock time of all the Task retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by all Tasks in all Jobs created under the schedule. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by all Tasks in all Jobs created under the schedule. + :type write_iops: long + :param read_io_gi_b: Required. The total gibibytes read from disk by all + Tasks in all Jobs created under the schedule. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total gibibytes written to disk by all + Tasks in all Jobs created under the schedule. + :type write_io_gi_b: float + :param num_succeeded_tasks: Required. The total number of Tasks + successfully completed during the given time range in Jobs created under + the schedule. A Task completes successfully if it returns exit code 0. + :type num_succeeded_tasks: long + :param num_failed_tasks: Required. The total number of Tasks that failed + during the given time range in Jobs created under the schedule. A Task + fails if it exhausts its maximum retry count without returning exit code + 0. + :type num_failed_tasks: long + :param num_task_retries: Required. The total number of retries during the + given time range on all Tasks in all Jobs created under the schedule. + :type num_task_retries: long + :param wait_time: Required. The total wait time of all Tasks in all Jobs + created under the schedule. The wait time for a Task is defined as the + elapsed time between the creation of the Task and the start of Task + execution. (If the Task is retried due to failures, the wait time is the + time to the most recent Task execution.). This value is only reported in + the Account lifetime statistics; it is not included in the Job statistics. + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'num_succeeded_tasks': {'required': True}, + 'num_failed_tasks': {'required': True}, + 'num_task_retries': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'num_succeeded_tasks': {'key': 'numSucceededTasks', 'type': 'long'}, + 'num_failed_tasks': {'key': 'numFailedTasks', 'type': 'long'}, + 'num_task_retries': {'key': 'numTaskRetries', 'type': 'long'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(JobScheduleStatistics, self).__init__(**kwargs) + self.url = kwargs.get('url', None) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.user_cpu_time = kwargs.get('user_cpu_time', None) + self.kernel_cpu_time = kwargs.get('kernel_cpu_time', None) + self.wall_clock_time = kwargs.get('wall_clock_time', None) + self.read_iops = kwargs.get('read_iops', None) + self.write_iops = kwargs.get('write_iops', None) + self.read_io_gi_b = kwargs.get('read_io_gi_b', None) + self.write_io_gi_b = kwargs.get('write_io_gi_b', None) + self.num_succeeded_tasks = kwargs.get('num_succeeded_tasks', None) + self.num_failed_tasks = kwargs.get('num_failed_tasks', None) + self.num_task_retries = kwargs.get('num_task_retries', None) + self.wait_time = kwargs.get('wait_time', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_statistics_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_statistics_py3.py new file mode 100644 index 00000000..15bdc089 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_statistics_py3.py @@ -0,0 +1,126 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleStatistics(Model): + """Resource usage statistics for a Job Schedule. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all Compute Nodes) consumed by all Tasks in all Jobs + created under the schedule. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all Compute Nodes) consumed by all Tasks in all Jobs + created under the schedule. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of all the + Tasks in all the Jobs created under the schedule. The wall clock time is + the elapsed time from when the Task started running on a Compute Node to + when it finished (or to the last time the statistics were updated, if the + Task had not finished by then). If a Task was retried, this includes the + wall clock time of all the Task retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by all Tasks in all Jobs created under the schedule. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by all Tasks in all Jobs created under the schedule. + :type write_iops: long + :param read_io_gi_b: Required. The total gibibytes read from disk by all + Tasks in all Jobs created under the schedule. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total gibibytes written to disk by all + Tasks in all Jobs created under the schedule. + :type write_io_gi_b: float + :param num_succeeded_tasks: Required. The total number of Tasks + successfully completed during the given time range in Jobs created under + the schedule. A Task completes successfully if it returns exit code 0. + :type num_succeeded_tasks: long + :param num_failed_tasks: Required. The total number of Tasks that failed + during the given time range in Jobs created under the schedule. A Task + fails if it exhausts its maximum retry count without returning exit code + 0. + :type num_failed_tasks: long + :param num_task_retries: Required. The total number of retries during the + given time range on all Tasks in all Jobs created under the schedule. + :type num_task_retries: long + :param wait_time: Required. The total wait time of all Tasks in all Jobs + created under the schedule. The wait time for a Task is defined as the + elapsed time between the creation of the Task and the start of Task + execution. (If the Task is retried due to failures, the wait time is the + time to the most recent Task execution.). This value is only reported in + the Account lifetime statistics; it is not included in the Job statistics. + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'num_succeeded_tasks': {'required': True}, + 'num_failed_tasks': {'required': True}, + 'num_task_retries': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'num_succeeded_tasks': {'key': 'numSucceededTasks', 'type': 'long'}, + 'num_failed_tasks': {'key': 'numFailedTasks', 'type': 'long'}, + 'num_task_retries': {'key': 'numTaskRetries', 'type': 'long'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, *, url: str, start_time, last_update_time, user_cpu_time, kernel_cpu_time, wall_clock_time, read_iops: int, write_iops: int, read_io_gi_b: float, write_io_gi_b: float, num_succeeded_tasks: int, num_failed_tasks: int, num_task_retries: int, wait_time, **kwargs) -> None: + super(JobScheduleStatistics, self).__init__(**kwargs) + self.url = url + self.start_time = start_time + self.last_update_time = last_update_time + self.user_cpu_time = user_cpu_time + self.kernel_cpu_time = kernel_cpu_time + self.wall_clock_time = wall_clock_time + self.read_iops = read_iops + self.write_iops = write_iops + self.read_io_gi_b = read_io_gi_b + self.write_io_gi_b = write_io_gi_b + self.num_succeeded_tasks = num_succeeded_tasks + self.num_failed_tasks = num_failed_tasks + self.num_task_retries = num_task_retries + self.wait_time = wait_time diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_terminate_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_terminate_options.py new file mode 100644 index 00000000..32a6f0d7 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_terminate_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleTerminateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_terminate_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_terminate_options_py3.py new file mode 100644 index 00000000..54789876 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_terminate_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleTerminateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_update_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_update_options.py new file mode 100644 index 00000000..ca3de898 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_update_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleUpdateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_update_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_update_options_py3.py new file mode 100644 index 00000000..aee92988 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_update_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleUpdateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_update_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_update_parameter.py new file mode 100644 index 00000000..bb01f620 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_update_parameter.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleUpdateParameter(Model): + """The set of changes to be made to a Job Schedule. + + All required parameters must be populated in order to send to Azure. + + :param schedule: Required. The schedule according to which Jobs will be + created. If you do not specify this element, it is equivalent to passing + the default schedule: that is, a single Job scheduled to run immediately. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: Required. Details of the Jobs to be created on + this schedule. Updates affect only Jobs that are started after the update + has taken place. Any currently active Job continues with the older + specification. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the Job + Schedule as metadata. If you do not specify this element, it takes the + default value of an empty list; in effect, any existing metadata is + deleted. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'schedule': {'required': True}, + 'job_specification': {'required': True}, + } + + _attribute_map = { + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(JobScheduleUpdateParameter, self).__init__(**kwargs) + self.schedule = kwargs.get('schedule', None) + self.job_specification = kwargs.get('job_specification', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_update_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_update_parameter_py3.py new file mode 100644 index 00000000..40195e0f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_schedule_update_parameter_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleUpdateParameter(Model): + """The set of changes to be made to a Job Schedule. + + All required parameters must be populated in order to send to Azure. + + :param schedule: Required. The schedule according to which Jobs will be + created. If you do not specify this element, it is equivalent to passing + the default schedule: that is, a single Job scheduled to run immediately. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: Required. Details of the Jobs to be created on + this schedule. Updates affect only Jobs that are started after the update + has taken place. Any currently active Job continues with the older + specification. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the Job + Schedule as metadata. If you do not specify this element, it takes the + default value of an empty list; in effect, any existing metadata is + deleted. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'schedule': {'required': True}, + 'job_specification': {'required': True}, + } + + _attribute_map = { + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, schedule, job_specification, metadata=None, **kwargs) -> None: + super(JobScheduleUpdateParameter, self).__init__(**kwargs) + self.schedule = schedule + self.job_specification = job_specification + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_scheduling_error.py b/azext/generated/sdk/batch/v2019_06_01/models/job_scheduling_error.py new file mode 100644 index 00000000..ff82e783 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_scheduling_error.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulingError(Model): + """An error encountered by the Batch service when scheduling a Job. + + All required parameters must be populated in order to send to Azure. + + :param category: Required. The category of the Job scheduling error. + Possible values include: 'userError', 'serverError' + :type category: str or ~azure.batch.models.ErrorCategory + :param code: An identifier for the Job scheduling error. Codes are + invariant and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the Job scheduling error, intended to + be suitable for display in a user interface. + :type message: str + :param details: A list of additional error details related to the + scheduling error. + :type details: list[~azure.batch.models.NameValuePair] + """ + + _validation = { + 'category': {'required': True}, + } + + _attribute_map = { + 'category': {'key': 'category', 'type': 'ErrorCategory'}, + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(JobSchedulingError, self).__init__(**kwargs) + self.category = kwargs.get('category', None) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.details = kwargs.get('details', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_scheduling_error_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_scheduling_error_py3.py new file mode 100644 index 00000000..2d635a17 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_scheduling_error_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulingError(Model): + """An error encountered by the Batch service when scheduling a Job. + + All required parameters must be populated in order to send to Azure. + + :param category: Required. The category of the Job scheduling error. + Possible values include: 'userError', 'serverError' + :type category: str or ~azure.batch.models.ErrorCategory + :param code: An identifier for the Job scheduling error. Codes are + invariant and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the Job scheduling error, intended to + be suitable for display in a user interface. + :type message: str + :param details: A list of additional error details related to the + scheduling error. + :type details: list[~azure.batch.models.NameValuePair] + """ + + _validation = { + 'category': {'required': True}, + } + + _attribute_map = { + 'category': {'key': 'category', 'type': 'ErrorCategory'}, + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, category, code: str=None, message: str=None, details=None, **kwargs) -> None: + super(JobSchedulingError, self).__init__(**kwargs) + self.category = category + self.code = code + self.message = message + self.details = details diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_specification.py b/azext/generated/sdk/batch/v2019_06_01/models/job_specification.py new file mode 100644 index 00000000..5cbe16fc --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_specification.py @@ -0,0 +1,129 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSpecification(Model): + """Specifies details of the Jobs to be created on a schedule. + + All required parameters must be populated in order to send to Azure. + + :param priority: The priority of Jobs created under this schedule. + Priority values can range from -1000 to 1000, with -1000 being the lowest + priority and 1000 being the highest priority. The default value is 0. This + priority is used as the default for all Jobs under the Job Schedule. You + can update a Job's priority after it has been created using by using the + update Job API. + :type priority: int + :param display_name: The display name for Jobs created under this + schedule. The name need not be unique and can contain any Unicode + characters up to a maximum length of 1024. + :type display_name: str + :param uses_task_dependencies: Whether Tasks in the Job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + :param on_all_tasks_complete: The action the Batch service should take + when all Tasks in a Job created under this schedule are in the completed + state. Note that if a Job contains no Tasks, then all Tasks are considered + complete. This option is therefore most commonly used with a Job Manager + task; if you want to use automatic Job termination without a Job Manager, + you should initially set onAllTasksComplete to noaction and update the Job + properties to set onAllTasksComplete to terminatejob once you have + finished adding Tasks. The default is noaction. Possible values include: + 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + Task fails in a Job created under this schedule. A Task is considered to + have failed if it have failed if has a failureInfo. A failureInfo is set + if the Task completes with a non-zero exit code after exhausting its retry + count, or if there was an error starting the Task, for example due to a + resource file download error. The default is noaction. Possible values + include: 'noAction', 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param network_configuration: The network configuration for the Job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration + :param constraints: The execution constraints for Jobs created under this + schedule. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: The details of a Job Manager Task to be launched + when a Job is started under this schedule. If the Job does not specify a + Job Manager Task, the user must explicitly add Tasks to the Job using the + Task API. If the Job does specify a Job Manager Task, the Batch service + creates the Job Manager Task when the Job is created, and will try to + schedule the Job Manager Task before scheduling other Tasks in the Job. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation Task for Jobs created + under this schedule. If a Job has a Job Preparation Task, the Batch + service will run the Job Preparation Task on a Node before starting any + Tasks of that Job on that Compute Node. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release Task for Jobs created under this + schedule. The primary purpose of the Job Release Task is to undo changes + to Nodes made by the Job Preparation Task. Example activities include + deleting local files, or shutting down services that were started as part + of Job preparation. A Job Release Task cannot be specified without also + specifying a Job Preparation Task for the Job. The Batch service runs the + Job Release Task on the Compute Nodes that have run the Job Preparation + Task. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: A list of common environment variable + settings. These environment variables are set for all Tasks in Jobs + created under this schedule (including the Job Manager, Job Preparation + and Job Release Tasks). Individual Tasks can override an environment + setting specified here by specifying the same setting name with a + different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: Required. The Pool on which the Batch service runs the + Tasks of Jobs created under this schedule. + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with each Job + created under this schedule as metadata. The Batch service does not assign + any meaning to metadata; it is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(JobSpecification, self).__init__(**kwargs) + self.priority = kwargs.get('priority', None) + self.display_name = kwargs.get('display_name', None) + self.uses_task_dependencies = kwargs.get('uses_task_dependencies', None) + self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) + self.on_task_failure = kwargs.get('on_task_failure', None) + self.network_configuration = kwargs.get('network_configuration', None) + self.constraints = kwargs.get('constraints', None) + self.job_manager_task = kwargs.get('job_manager_task', None) + self.job_preparation_task = kwargs.get('job_preparation_task', None) + self.job_release_task = kwargs.get('job_release_task', None) + self.common_environment_settings = kwargs.get('common_environment_settings', None) + self.pool_info = kwargs.get('pool_info', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_specification_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_specification_py3.py new file mode 100644 index 00000000..b9793eea --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_specification_py3.py @@ -0,0 +1,129 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSpecification(Model): + """Specifies details of the Jobs to be created on a schedule. + + All required parameters must be populated in order to send to Azure. + + :param priority: The priority of Jobs created under this schedule. + Priority values can range from -1000 to 1000, with -1000 being the lowest + priority and 1000 being the highest priority. The default value is 0. This + priority is used as the default for all Jobs under the Job Schedule. You + can update a Job's priority after it has been created using by using the + update Job API. + :type priority: int + :param display_name: The display name for Jobs created under this + schedule. The name need not be unique and can contain any Unicode + characters up to a maximum length of 1024. + :type display_name: str + :param uses_task_dependencies: Whether Tasks in the Job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + :param on_all_tasks_complete: The action the Batch service should take + when all Tasks in a Job created under this schedule are in the completed + state. Note that if a Job contains no Tasks, then all Tasks are considered + complete. This option is therefore most commonly used with a Job Manager + task; if you want to use automatic Job termination without a Job Manager, + you should initially set onAllTasksComplete to noaction and update the Job + properties to set onAllTasksComplete to terminatejob once you have + finished adding Tasks. The default is noaction. Possible values include: + 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + Task fails in a Job created under this schedule. A Task is considered to + have failed if it have failed if has a failureInfo. A failureInfo is set + if the Task completes with a non-zero exit code after exhausting its retry + count, or if there was an error starting the Task, for example due to a + resource file download error. The default is noaction. Possible values + include: 'noAction', 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param network_configuration: The network configuration for the Job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration + :param constraints: The execution constraints for Jobs created under this + schedule. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: The details of a Job Manager Task to be launched + when a Job is started under this schedule. If the Job does not specify a + Job Manager Task, the user must explicitly add Tasks to the Job using the + Task API. If the Job does specify a Job Manager Task, the Batch service + creates the Job Manager Task when the Job is created, and will try to + schedule the Job Manager Task before scheduling other Tasks in the Job. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation Task for Jobs created + under this schedule. If a Job has a Job Preparation Task, the Batch + service will run the Job Preparation Task on a Node before starting any + Tasks of that Job on that Compute Node. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release Task for Jobs created under this + schedule. The primary purpose of the Job Release Task is to undo changes + to Nodes made by the Job Preparation Task. Example activities include + deleting local files, or shutting down services that were started as part + of Job preparation. A Job Release Task cannot be specified without also + specifying a Job Preparation Task for the Job. The Batch service runs the + Job Release Task on the Compute Nodes that have run the Job Preparation + Task. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: A list of common environment variable + settings. These environment variables are set for all Tasks in Jobs + created under this schedule (including the Job Manager, Job Preparation + and Job Release Tasks). Individual Tasks can override an environment + setting specified here by specifying the same setting name with a + different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: Required. The Pool on which the Batch service runs the + Tasks of Jobs created under this schedule. + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with each Job + created under this schedule as metadata. The Batch service does not assign + any meaning to metadata; it is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, pool_info, priority: int=None, display_name: str=None, uses_task_dependencies: bool=None, on_all_tasks_complete=None, on_task_failure=None, network_configuration=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, metadata=None, **kwargs) -> None: + super(JobSpecification, self).__init__(**kwargs) + self.priority = priority + self.display_name = display_name + self.uses_task_dependencies = uses_task_dependencies + self.on_all_tasks_complete = on_all_tasks_complete + self.on_task_failure = on_task_failure + self.network_configuration = network_configuration + self.constraints = constraints + self.job_manager_task = job_manager_task + self.job_preparation_task = job_preparation_task + self.job_release_task = job_release_task + self.common_environment_settings = common_environment_settings + self.pool_info = pool_info + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_statistics.py b/azext/generated/sdk/batch/v2019_06_01/models/job_statistics.py new file mode 100644 index 00000000..db6d382d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_statistics.py @@ -0,0 +1,123 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobStatistics(Model): + """Resource usage statistics for a Job. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all Compute Nodes) consumed by all Tasks in the Job. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all Compute Nodes) consumed by all Tasks in the Job. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of all Tasks + in the Job. The wall clock time is the elapsed time from when the Task + started running on a Compute Node to when it finished (or to the last time + the statistics were updated, if the Task had not finished by then). If a + Task was retried, this includes the wall clock time of all the Task + retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by all Tasks in the Job. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by all Tasks in the Job. + :type write_iops: long + :param read_io_gi_b: Required. The total amount of data in GiB read from + disk by all Tasks in the Job. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total amount of data in GiB written to + disk by all Tasks in the Job. + :type write_io_gi_b: float + :param num_succeeded_tasks: Required. The total number of Tasks + successfully completed in the Job during the given time range. A Task + completes successfully if it returns exit code 0. + :type num_succeeded_tasks: long + :param num_failed_tasks: Required. The total number of Tasks in the Job + that failed during the given time range. A Task fails if it exhausts its + maximum retry count without returning exit code 0. + :type num_failed_tasks: long + :param num_task_retries: Required. The total number of retries on all the + Tasks in the Job during the given time range. + :type num_task_retries: long + :param wait_time: Required. The total wait time of all Tasks in the Job. + The wait time for a Task is defined as the elapsed time between the + creation of the Task and the start of Task execution. (If the Task is + retried due to failures, the wait time is the time to the most recent Task + execution.) This value is only reported in the Account lifetime + statistics; it is not included in the Job statistics. + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'num_succeeded_tasks': {'required': True}, + 'num_failed_tasks': {'required': True}, + 'num_task_retries': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'num_succeeded_tasks': {'key': 'numSucceededTasks', 'type': 'long'}, + 'num_failed_tasks': {'key': 'numFailedTasks', 'type': 'long'}, + 'num_task_retries': {'key': 'numTaskRetries', 'type': 'long'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(JobStatistics, self).__init__(**kwargs) + self.url = kwargs.get('url', None) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.user_cpu_time = kwargs.get('user_cpu_time', None) + self.kernel_cpu_time = kwargs.get('kernel_cpu_time', None) + self.wall_clock_time = kwargs.get('wall_clock_time', None) + self.read_iops = kwargs.get('read_iops', None) + self.write_iops = kwargs.get('write_iops', None) + self.read_io_gi_b = kwargs.get('read_io_gi_b', None) + self.write_io_gi_b = kwargs.get('write_io_gi_b', None) + self.num_succeeded_tasks = kwargs.get('num_succeeded_tasks', None) + self.num_failed_tasks = kwargs.get('num_failed_tasks', None) + self.num_task_retries = kwargs.get('num_task_retries', None) + self.wait_time = kwargs.get('wait_time', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_statistics_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_statistics_py3.py new file mode 100644 index 00000000..c12c785f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_statistics_py3.py @@ -0,0 +1,123 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobStatistics(Model): + """Resource usage statistics for a Job. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all Compute Nodes) consumed by all Tasks in the Job. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all Compute Nodes) consumed by all Tasks in the Job. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of all Tasks + in the Job. The wall clock time is the elapsed time from when the Task + started running on a Compute Node to when it finished (or to the last time + the statistics were updated, if the Task had not finished by then). If a + Task was retried, this includes the wall clock time of all the Task + retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by all Tasks in the Job. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by all Tasks in the Job. + :type write_iops: long + :param read_io_gi_b: Required. The total amount of data in GiB read from + disk by all Tasks in the Job. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total amount of data in GiB written to + disk by all Tasks in the Job. + :type write_io_gi_b: float + :param num_succeeded_tasks: Required. The total number of Tasks + successfully completed in the Job during the given time range. A Task + completes successfully if it returns exit code 0. + :type num_succeeded_tasks: long + :param num_failed_tasks: Required. The total number of Tasks in the Job + that failed during the given time range. A Task fails if it exhausts its + maximum retry count without returning exit code 0. + :type num_failed_tasks: long + :param num_task_retries: Required. The total number of retries on all the + Tasks in the Job during the given time range. + :type num_task_retries: long + :param wait_time: Required. The total wait time of all Tasks in the Job. + The wait time for a Task is defined as the elapsed time between the + creation of the Task and the start of Task execution. (If the Task is + retried due to failures, the wait time is the time to the most recent Task + execution.) This value is only reported in the Account lifetime + statistics; it is not included in the Job statistics. + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'num_succeeded_tasks': {'required': True}, + 'num_failed_tasks': {'required': True}, + 'num_task_retries': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'num_succeeded_tasks': {'key': 'numSucceededTasks', 'type': 'long'}, + 'num_failed_tasks': {'key': 'numFailedTasks', 'type': 'long'}, + 'num_task_retries': {'key': 'numTaskRetries', 'type': 'long'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, *, url: str, start_time, last_update_time, user_cpu_time, kernel_cpu_time, wall_clock_time, read_iops: int, write_iops: int, read_io_gi_b: float, write_io_gi_b: float, num_succeeded_tasks: int, num_failed_tasks: int, num_task_retries: int, wait_time, **kwargs) -> None: + super(JobStatistics, self).__init__(**kwargs) + self.url = url + self.start_time = start_time + self.last_update_time = last_update_time + self.user_cpu_time = user_cpu_time + self.kernel_cpu_time = kernel_cpu_time + self.wall_clock_time = wall_clock_time + self.read_iops = read_iops + self.write_iops = write_iops + self.read_io_gi_b = read_io_gi_b + self.write_io_gi_b = write_io_gi_b + self.num_succeeded_tasks = num_succeeded_tasks + self.num_failed_tasks = num_failed_tasks + self.num_task_retries = num_task_retries + self.wait_time = wait_time diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_terminate_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_terminate_options.py new file mode 100644 index 00000000..b858c404 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_terminate_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobTerminateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_terminate_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_terminate_options_py3.py new file mode 100644 index 00000000..77173bcc --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_terminate_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobTerminateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_terminate_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/job_terminate_parameter.py new file mode 100644 index 00000000..ac909e55 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_terminate_parameter.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobTerminateParameter(Model): + """Options when terminating a Job. + + :param terminate_reason: The text you want to appear as the Job's + TerminateReason. The default is 'UserTerminate'. + :type terminate_reason: str + """ + + _attribute_map = { + 'terminate_reason': {'key': 'terminateReason', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(JobTerminateParameter, self).__init__(**kwargs) + self.terminate_reason = kwargs.get('terminate_reason', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_terminate_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_terminate_parameter_py3.py new file mode 100644 index 00000000..d468786e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_terminate_parameter_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobTerminateParameter(Model): + """Options when terminating a Job. + + :param terminate_reason: The text you want to appear as the Job's + TerminateReason. The default is 'UserTerminate'. + :type terminate_reason: str + """ + + _attribute_map = { + 'terminate_reason': {'key': 'terminateReason', 'type': 'str'}, + } + + def __init__(self, *, terminate_reason: str=None, **kwargs) -> None: + super(JobTerminateParameter, self).__init__(**kwargs) + self.terminate_reason = terminate_reason diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_update_options.py b/azext/generated/sdk/batch/v2019_06_01/models/job_update_options.py new file mode 100644 index 00000000..a11f18ab --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_update_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobUpdateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_update_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_update_options_py3.py new file mode 100644 index 00000000..61a47c21 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_update_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobUpdateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_update_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/job_update_parameter.py new file mode 100644 index 00000000..c0e4686e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_update_parameter.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobUpdateParameter(Model): + """The set of changes to be made to a Job. + + All required parameters must be populated in order to send to Azure. + + :param priority: The priority of the Job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. If omitted, it is set to the default value 0. + :type priority: int + :param constraints: The execution constraints for the Job. If omitted, the + constraints are cleared. + :type constraints: ~azure.batch.models.JobConstraints + :param pool_info: Required. The Pool on which the Batch service runs the + Job's Tasks. You may change the Pool for a Job only when the Job is + disabled. The Update Job call will fail if you include the poolInfo + element and the Job is not disabled. If you specify an + autoPoolSpecification specification in the poolInfo, only the keepAlive + property can be updated, and then only if the auto Pool has a + poolLifetimeOption of Job. + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with the Job as + metadata. If omitted, it takes the default value of an empty list; in + effect, any existing metadata is deleted. + :type metadata: list[~azure.batch.models.MetadataItem] + :param on_all_tasks_complete: The action the Batch service should take + when all Tasks in the Job are in the completed state. If omitted, the + completion behavior is set to noaction. If the current value is + terminatejob, this is an error because a Job's completion behavior may not + be changed from terminatejob to noaction. You may not change the value + from terminatejob to noaction - that is, once you have engaged automatic + Job termination, you cannot turn it off again. If you try to do this, the + request fails and Batch returns status code 400 (Bad Request) and an + 'invalid property value' error response. If you do not specify this + element in a PUT request, it is equivalent to passing noaction. This is an + error if the current value is terminatejob. Possible values include: + 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + """ + + _validation = { + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + } + + def __init__(self, **kwargs): + super(JobUpdateParameter, self).__init__(**kwargs) + self.priority = kwargs.get('priority', None) + self.constraints = kwargs.get('constraints', None) + self.pool_info = kwargs.get('pool_info', None) + self.metadata = kwargs.get('metadata', None) + self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/job_update_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/job_update_parameter_py3.py new file mode 100644 index 00000000..c1c5b826 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/job_update_parameter_py3.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobUpdateParameter(Model): + """The set of changes to be made to a Job. + + All required parameters must be populated in order to send to Azure. + + :param priority: The priority of the Job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. If omitted, it is set to the default value 0. + :type priority: int + :param constraints: The execution constraints for the Job. If omitted, the + constraints are cleared. + :type constraints: ~azure.batch.models.JobConstraints + :param pool_info: Required. The Pool on which the Batch service runs the + Job's Tasks. You may change the Pool for a Job only when the Job is + disabled. The Update Job call will fail if you include the poolInfo + element and the Job is not disabled. If you specify an + autoPoolSpecification specification in the poolInfo, only the keepAlive + property can be updated, and then only if the auto Pool has a + poolLifetimeOption of Job. + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with the Job as + metadata. If omitted, it takes the default value of an empty list; in + effect, any existing metadata is deleted. + :type metadata: list[~azure.batch.models.MetadataItem] + :param on_all_tasks_complete: The action the Batch service should take + when all Tasks in the Job are in the completed state. If omitted, the + completion behavior is set to noaction. If the current value is + terminatejob, this is an error because a Job's completion behavior may not + be changed from terminatejob to noaction. You may not change the value + from terminatejob to noaction - that is, once you have engaged automatic + Job termination, you cannot turn it off again. If you try to do this, the + request fails and Batch returns status code 400 (Bad Request) and an + 'invalid property value' error response. If you do not specify this + element in a PUT request, it is equivalent to passing noaction. This is an + error if the current value is terminatejob. Possible values include: + 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + """ + + _validation = { + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + } + + def __init__(self, *, pool_info, priority: int=None, constraints=None, metadata=None, on_all_tasks_complete=None, **kwargs) -> None: + super(JobUpdateParameter, self).__init__(**kwargs) + self.priority = priority + self.constraints = constraints + self.pool_info = pool_info + self.metadata = metadata + self.on_all_tasks_complete = on_all_tasks_complete diff --git a/azext/generated/sdk/batch/v2019_06_01/models/linux_user_configuration.py b/azext/generated/sdk/batch/v2019_06_01/models/linux_user_configuration.py new file mode 100644 index 00000000..25f2768f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/linux_user_configuration.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class LinuxUserConfiguration(Model): + """Properties used to create a user Account on a Linux Compute Node. + + :param uid: The user ID of the user Account. The uid and gid properties + must be specified together or not at all. If not specified the underlying + operating system picks the uid. + :type uid: int + :param gid: The group ID for the user Account. The uid and gid properties + must be specified together or not at all. If not specified the underlying + operating system picks the gid. + :type gid: int + :param ssh_private_key: The SSH private key for the user Account. The + private key must not be password protected. The private key is used to + automatically configure asymmetric-key based authentication for SSH + between Compute Nodes in a Linux Pool when the Pool's + enableInterNodeCommunication property is true (it is ignored if + enableInterNodeCommunication is false). It does this by placing the key + pair into the user's .ssh directory. If not specified, password-less SSH + is not configured between Compute Nodes (no modification of the user's + .ssh directory is done). + :type ssh_private_key: str + """ + + _attribute_map = { + 'uid': {'key': 'uid', 'type': 'int'}, + 'gid': {'key': 'gid', 'type': 'int'}, + 'ssh_private_key': {'key': 'sshPrivateKey', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(LinuxUserConfiguration, self).__init__(**kwargs) + self.uid = kwargs.get('uid', None) + self.gid = kwargs.get('gid', None) + self.ssh_private_key = kwargs.get('ssh_private_key', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/linux_user_configuration_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/linux_user_configuration_py3.py new file mode 100644 index 00000000..5025ea07 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/linux_user_configuration_py3.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class LinuxUserConfiguration(Model): + """Properties used to create a user Account on a Linux Compute Node. + + :param uid: The user ID of the user Account. The uid and gid properties + must be specified together or not at all. If not specified the underlying + operating system picks the uid. + :type uid: int + :param gid: The group ID for the user Account. The uid and gid properties + must be specified together or not at all. If not specified the underlying + operating system picks the gid. + :type gid: int + :param ssh_private_key: The SSH private key for the user Account. The + private key must not be password protected. The private key is used to + automatically configure asymmetric-key based authentication for SSH + between Compute Nodes in a Linux Pool when the Pool's + enableInterNodeCommunication property is true (it is ignored if + enableInterNodeCommunication is false). It does this by placing the key + pair into the user's .ssh directory. If not specified, password-less SSH + is not configured between Compute Nodes (no modification of the user's + .ssh directory is done). + :type ssh_private_key: str + """ + + _attribute_map = { + 'uid': {'key': 'uid', 'type': 'int'}, + 'gid': {'key': 'gid', 'type': 'int'}, + 'ssh_private_key': {'key': 'sshPrivateKey', 'type': 'str'}, + } + + def __init__(self, *, uid: int=None, gid: int=None, ssh_private_key: str=None, **kwargs) -> None: + super(LinuxUserConfiguration, self).__init__(**kwargs) + self.uid = uid + self.gid = gid + self.ssh_private_key = ssh_private_key diff --git a/azext/generated/sdk/batch/v2019_06_01/models/metadata_item.py b/azext/generated/sdk/batch/v2019_06_01/models/metadata_item.py new file mode 100644 index 00000000..d1d203e8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/metadata_item.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class MetadataItem(Model): + """A name-value pair associated with a Batch service resource. + + The Batch service does not assign any meaning to this metadata; it is + solely for the use of user code. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the metadata item. + :type name: str + :param value: Required. The value of the metadata item. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(MetadataItem, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/metadata_item_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/metadata_item_py3.py new file mode 100644 index 00000000..3d127cd1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/metadata_item_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class MetadataItem(Model): + """A name-value pair associated with a Batch service resource. + + The Batch service does not assign any meaning to this metadata; it is + solely for the use of user code. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the metadata item. + :type name: str + :param value: Required. The value of the metadata item. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, *, name: str, value: str, **kwargs) -> None: + super(MetadataItem, self).__init__(**kwargs) + self.name = name + self.value = value diff --git a/azext/generated/sdk/batch/v2019_06_01/models/multi_instance_settings.py b/azext/generated/sdk/batch/v2019_06_01/models/multi_instance_settings.py new file mode 100644 index 00000000..d88a46e1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/multi_instance_settings.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class MultiInstanceSettings(Model): + """Settings which specify how to run a multi-instance Task. + + Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI + case, if any of the subtasks fail (for example due to exiting with a + non-zero exit code) the entire multi-instance Task fails. The + multi-instance Task is then terminated and retried, up to its retry limit. + + All required parameters must be populated in order to send to Azure. + + :param number_of_instances: The number of Compute Nodes required by the + Task. If omitted, the default is 1. + :type number_of_instances: int + :param coordination_command_line: Required. The command line to run on all + the Compute Nodes to enable them to coordinate when the primary runs the + main Task command. A typical coordination command line launches a + background service and verifies that the service is ready to process + inter-node messages. + :type coordination_command_line: str + :param common_resource_files: A list of files that the Batch service will + download before running the coordination command line. The difference + between common resource files and Task resource files is that common + resource files are downloaded for all subtasks including the primary, + whereas Task resource files are downloaded only for the primary. Also note + that these resource files are not downloaded to the Task working + directory, but instead are downloaded to the Task root directory (one + directory above the working directory). There is a maximum size for the + list of resource files. When the max size is exceeded, the request will + fail and the response error code will be RequestEntityTooLarge. If this + occurs, the collection of ResourceFiles must be reduced in size. This can + be achieved using .zip files, Application Packages, or Docker Containers. + :type common_resource_files: list[~azure.batch.models.ResourceFile] + """ + + _validation = { + 'coordination_command_line': {'required': True}, + } + + _attribute_map = { + 'number_of_instances': {'key': 'numberOfInstances', 'type': 'int'}, + 'coordination_command_line': {'key': 'coordinationCommandLine', 'type': 'str'}, + 'common_resource_files': {'key': 'commonResourceFiles', 'type': '[ResourceFile]'}, + } + + def __init__(self, **kwargs): + super(MultiInstanceSettings, self).__init__(**kwargs) + self.number_of_instances = kwargs.get('number_of_instances', None) + self.coordination_command_line = kwargs.get('coordination_command_line', None) + self.common_resource_files = kwargs.get('common_resource_files', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/multi_instance_settings_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/multi_instance_settings_py3.py new file mode 100644 index 00000000..347a46ea --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/multi_instance_settings_py3.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class MultiInstanceSettings(Model): + """Settings which specify how to run a multi-instance Task. + + Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI + case, if any of the subtasks fail (for example due to exiting with a + non-zero exit code) the entire multi-instance Task fails. The + multi-instance Task is then terminated and retried, up to its retry limit. + + All required parameters must be populated in order to send to Azure. + + :param number_of_instances: The number of Compute Nodes required by the + Task. If omitted, the default is 1. + :type number_of_instances: int + :param coordination_command_line: Required. The command line to run on all + the Compute Nodes to enable them to coordinate when the primary runs the + main Task command. A typical coordination command line launches a + background service and verifies that the service is ready to process + inter-node messages. + :type coordination_command_line: str + :param common_resource_files: A list of files that the Batch service will + download before running the coordination command line. The difference + between common resource files and Task resource files is that common + resource files are downloaded for all subtasks including the primary, + whereas Task resource files are downloaded only for the primary. Also note + that these resource files are not downloaded to the Task working + directory, but instead are downloaded to the Task root directory (one + directory above the working directory). There is a maximum size for the + list of resource files. When the max size is exceeded, the request will + fail and the response error code will be RequestEntityTooLarge. If this + occurs, the collection of ResourceFiles must be reduced in size. This can + be achieved using .zip files, Application Packages, or Docker Containers. + :type common_resource_files: list[~azure.batch.models.ResourceFile] + """ + + _validation = { + 'coordination_command_line': {'required': True}, + } + + _attribute_map = { + 'number_of_instances': {'key': 'numberOfInstances', 'type': 'int'}, + 'coordination_command_line': {'key': 'coordinationCommandLine', 'type': 'str'}, + 'common_resource_files': {'key': 'commonResourceFiles', 'type': '[ResourceFile]'}, + } + + def __init__(self, *, coordination_command_line: str, number_of_instances: int=None, common_resource_files=None, **kwargs) -> None: + super(MultiInstanceSettings, self).__init__(**kwargs) + self.number_of_instances = number_of_instances + self.coordination_command_line = coordination_command_line + self.common_resource_files = common_resource_files diff --git a/azext/generated/sdk/batch/v2019_06_01/models/name_value_pair.py b/azext/generated/sdk/batch/v2019_06_01/models/name_value_pair.py new file mode 100644 index 00000000..d2775a33 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/name_value_pair.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NameValuePair(Model): + """Represents a name-value pair. + + :param name: The name in the name-value pair. + :type name: str + :param value: The value in the name-value pair. + :type value: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(NameValuePair, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/name_value_pair_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/name_value_pair_py3.py new file mode 100644 index 00000000..9e508e56 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/name_value_pair_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NameValuePair(Model): + """Represents a name-value pair. + + :param name: The name in the name-value pair. + :type name: str + :param value: The value in the name-value pair. + :type value: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, value: str=None, **kwargs) -> None: + super(NameValuePair, self).__init__(**kwargs) + self.name = name + self.value = value diff --git a/azext/generated/sdk/batch/v2019_06_01/models/network_configuration.py b/azext/generated/sdk/batch/v2019_06_01/models/network_configuration.py new file mode 100644 index 00000000..93fcbaec --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/network_configuration.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NetworkConfiguration(Model): + """The network configuration for a Pool. + + :param subnet_id: The ARM resource identifier of the virtual network + subnet which the Compute Nodes of the Pool will join. This is of the form + /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. + The virtual network must be in the same region and subscription as the + Azure Batch Account. The specified subnet should have enough free IP + addresses to accommodate the number of Compute Nodes in the Pool. If the + subnet doesn't have enough free IP addresses, the Pool will partially + allocate Nodes, and a resize error will occur. The 'MicrosoftAzureBatch' + service principal must have the 'Classic Virtual Machine Contributor' + Role-Based Access Control (RBAC) role for the specified VNet. The + specified subnet must allow communication from the Azure Batch service to + be able to schedule Tasks on the Nodes. This can be verified by checking + if the specified VNet has any associated Network Security Groups (NSG). If + communication to the Nodes in the specified subnet is denied by an NSG, + then the Batch service will set the state of the Compute Nodes to + unusable. For Pools created with virtualMachineConfiguration only ARM + virtual networks ('Microsoft.Network/virtualNetworks') are supported, but + for Pools created with cloudServiceConfiguration both ARM and classic + virtual networks are supported. If the specified VNet has any associated + Network Security Groups (NSG), then a few reserved system ports must be + enabled for inbound communication. For Pools created with a virtual + machine configuration, enable ports 29876 and 29877, as well as port 22 + for Linux and port 3389 for Windows. For Pools created with a cloud + service configuration, enable ports 10100, 20100, and 30100. Also enable + outbound connections to Azure Storage on port 443. For more details see: + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration + :type subnet_id: str + :param dynamic_vnet_assignment_scope: The scope of dynamic vnet + assignment. Possible values include: 'none', 'job' + :type dynamic_vnet_assignment_scope: str or + ~azure.batch.models.DynamicVNetAssignmentScope + :param endpoint_configuration: The configuration for endpoints on Compute + Nodes in the Batch Pool. Pool endpoint configuration is only supported on + Pools with the virtualMachineConfiguration property. + :type endpoint_configuration: + ~azure.batch.models.PoolEndpointConfiguration + """ + + _attribute_map = { + 'subnet_id': {'key': 'subnetId', 'type': 'str'}, + 'dynamic_vnet_assignment_scope': {'key': 'dynamicVNetAssignmentScope', 'type': 'DynamicVNetAssignmentScope'}, + 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'PoolEndpointConfiguration'}, + } + + def __init__(self, **kwargs): + super(NetworkConfiguration, self).__init__(**kwargs) + self.subnet_id = kwargs.get('subnet_id', None) + self.dynamic_vnet_assignment_scope = kwargs.get('dynamic_vnet_assignment_scope', None) + self.endpoint_configuration = kwargs.get('endpoint_configuration', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/network_configuration_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/network_configuration_py3.py new file mode 100644 index 00000000..3fd3f8d6 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/network_configuration_py3.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NetworkConfiguration(Model): + """The network configuration for a Pool. + + :param subnet_id: The ARM resource identifier of the virtual network + subnet which the Compute Nodes of the Pool will join. This is of the form + /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. + The virtual network must be in the same region and subscription as the + Azure Batch Account. The specified subnet should have enough free IP + addresses to accommodate the number of Compute Nodes in the Pool. If the + subnet doesn't have enough free IP addresses, the Pool will partially + allocate Nodes, and a resize error will occur. The 'MicrosoftAzureBatch' + service principal must have the 'Classic Virtual Machine Contributor' + Role-Based Access Control (RBAC) role for the specified VNet. The + specified subnet must allow communication from the Azure Batch service to + be able to schedule Tasks on the Nodes. This can be verified by checking + if the specified VNet has any associated Network Security Groups (NSG). If + communication to the Nodes in the specified subnet is denied by an NSG, + then the Batch service will set the state of the Compute Nodes to + unusable. For Pools created with virtualMachineConfiguration only ARM + virtual networks ('Microsoft.Network/virtualNetworks') are supported, but + for Pools created with cloudServiceConfiguration both ARM and classic + virtual networks are supported. If the specified VNet has any associated + Network Security Groups (NSG), then a few reserved system ports must be + enabled for inbound communication. For Pools created with a virtual + machine configuration, enable ports 29876 and 29877, as well as port 22 + for Linux and port 3389 for Windows. For Pools created with a cloud + service configuration, enable ports 10100, 20100, and 30100. Also enable + outbound connections to Azure Storage on port 443. For more details see: + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration + :type subnet_id: str + :param dynamic_vnet_assignment_scope: The scope of dynamic vnet + assignment. Possible values include: 'none', 'job' + :type dynamic_vnet_assignment_scope: str or + ~azure.batch.models.DynamicVNetAssignmentScope + :param endpoint_configuration: The configuration for endpoints on Compute + Nodes in the Batch Pool. Pool endpoint configuration is only supported on + Pools with the virtualMachineConfiguration property. + :type endpoint_configuration: + ~azure.batch.models.PoolEndpointConfiguration + """ + + _attribute_map = { + 'subnet_id': {'key': 'subnetId', 'type': 'str'}, + 'dynamic_vnet_assignment_scope': {'key': 'dynamicVNetAssignmentScope', 'type': 'DynamicVNetAssignmentScope'}, + 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'PoolEndpointConfiguration'}, + } + + def __init__(self, *, subnet_id: str=None, dynamic_vnet_assignment_scope=None, endpoint_configuration=None, **kwargs) -> None: + super(NetworkConfiguration, self).__init__(**kwargs) + self.subnet_id = subnet_id + self.dynamic_vnet_assignment_scope = dynamic_vnet_assignment_scope + self.endpoint_configuration = endpoint_configuration diff --git a/azext/generated/sdk/batch/v2019_06_01/models/network_security_group_rule.py b/azext/generated/sdk/batch/v2019_06_01/models/network_security_group_rule.py new file mode 100644 index 00000000..fe3fd6a8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/network_security_group_rule.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NetworkSecurityGroupRule(Model): + """A network security group rule to apply to an inbound endpoint. + + All required parameters must be populated in order to send to Azure. + + :param priority: Required. The priority for this rule. Priorities within a + Pool must be unique and are evaluated in order of priority. The lower the + number the higher the priority. For example, rules could be specified with + order numbers of 150, 250, and 350. The rule with the order number of 150 + takes precedence over the rule that has an order of 250. Allowed + priorities are 150 to 3500. If any reserved or duplicate values are + provided the request fails with HTTP status code 400. + :type priority: int + :param access: Required. The action that should be taken for a specified + IP address, subnet range or tag. Possible values include: 'allow', 'deny' + :type access: str or ~azure.batch.models.NetworkSecurityGroupRuleAccess + :param source_address_prefix: Required. The source address prefix or tag + to match for the rule. Valid values are a single IP address (i.e. + 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all + addresses). If any other values are provided the request fails with HTTP + status code 400. + :type source_address_prefix: str + :param source_port_ranges: The source port ranges to match for the rule. + Valid values are '*' (for all ports 0 - 65535), a specific port (i.e. 22), + or a port range (i.e. 100-200). The ports must be in the range of 0 to + 65535. Each entry in this collection must not overlap any other entry + (either a range or an individual port). If any other values are provided + the request fails with HTTP status code 400. The default value is '*'. + :type source_port_ranges: list[str] + """ + + _validation = { + 'priority': {'required': True}, + 'access': {'required': True}, + 'source_address_prefix': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'access': {'key': 'access', 'type': 'NetworkSecurityGroupRuleAccess'}, + 'source_address_prefix': {'key': 'sourceAddressPrefix', 'type': 'str'}, + 'source_port_ranges': {'key': 'sourcePortRanges', 'type': '[str]'}, + } + + def __init__(self, **kwargs): + super(NetworkSecurityGroupRule, self).__init__(**kwargs) + self.priority = kwargs.get('priority', None) + self.access = kwargs.get('access', None) + self.source_address_prefix = kwargs.get('source_address_prefix', None) + self.source_port_ranges = kwargs.get('source_port_ranges', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/network_security_group_rule_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/network_security_group_rule_py3.py new file mode 100644 index 00000000..bed8fb49 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/network_security_group_rule_py3.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NetworkSecurityGroupRule(Model): + """A network security group rule to apply to an inbound endpoint. + + All required parameters must be populated in order to send to Azure. + + :param priority: Required. The priority for this rule. Priorities within a + Pool must be unique and are evaluated in order of priority. The lower the + number the higher the priority. For example, rules could be specified with + order numbers of 150, 250, and 350. The rule with the order number of 150 + takes precedence over the rule that has an order of 250. Allowed + priorities are 150 to 3500. If any reserved or duplicate values are + provided the request fails with HTTP status code 400. + :type priority: int + :param access: Required. The action that should be taken for a specified + IP address, subnet range or tag. Possible values include: 'allow', 'deny' + :type access: str or ~azure.batch.models.NetworkSecurityGroupRuleAccess + :param source_address_prefix: Required. The source address prefix or tag + to match for the rule. Valid values are a single IP address (i.e. + 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all + addresses). If any other values are provided the request fails with HTTP + status code 400. + :type source_address_prefix: str + :param source_port_ranges: The source port ranges to match for the rule. + Valid values are '*' (for all ports 0 - 65535), a specific port (i.e. 22), + or a port range (i.e. 100-200). The ports must be in the range of 0 to + 65535. Each entry in this collection must not overlap any other entry + (either a range or an individual port). If any other values are provided + the request fails with HTTP status code 400. The default value is '*'. + :type source_port_ranges: list[str] + """ + + _validation = { + 'priority': {'required': True}, + 'access': {'required': True}, + 'source_address_prefix': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'access': {'key': 'access', 'type': 'NetworkSecurityGroupRuleAccess'}, + 'source_address_prefix': {'key': 'sourceAddressPrefix', 'type': 'str'}, + 'source_port_ranges': {'key': 'sourcePortRanges', 'type': '[str]'}, + } + + def __init__(self, *, priority: int, access, source_address_prefix: str, source_port_ranges=None, **kwargs) -> None: + super(NetworkSecurityGroupRule, self).__init__(**kwargs) + self.priority = priority + self.access = access + self.source_address_prefix = source_address_prefix + self.source_port_ranges = source_port_ranges diff --git a/azext/generated/sdk/batch/v2019_06_01/models/node_agent_information.py b/azext/generated/sdk/batch/v2019_06_01/models/node_agent_information.py new file mode 100644 index 00000000..d0ec5f5f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/node_agent_information.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeAgentInformation(Model): + """Information about the Compute Node agent. + + The Batch Compute Node agent is a program that runs on each Compute Node in + the Pool and provides Batch capability on the Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The version of the Batch Compute Node agent + running on the Compute Node. This version number can be checked against + the Compute Node agent release notes located at + https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. + :type version: str + :param last_update_time: Required. The time when the Compute Node agent + was updated on the Compute Node. This is the most recent time that the + Compute Node agent was updated to a new version. + :type last_update_time: datetime + """ + + _validation = { + 'version': {'required': True}, + 'last_update_time': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'version', 'type': 'str'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(NodeAgentInformation, self).__init__(**kwargs) + self.version = kwargs.get('version', None) + self.last_update_time = kwargs.get('last_update_time', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/node_agent_information_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/node_agent_information_py3.py new file mode 100644 index 00000000..2be72956 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/node_agent_information_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeAgentInformation(Model): + """Information about the Compute Node agent. + + The Batch Compute Node agent is a program that runs on each Compute Node in + the Pool and provides Batch capability on the Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The version of the Batch Compute Node agent + running on the Compute Node. This version number can be checked against + the Compute Node agent release notes located at + https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. + :type version: str + :param last_update_time: Required. The time when the Compute Node agent + was updated on the Compute Node. This is the most recent time that the + Compute Node agent was updated to a new version. + :type last_update_time: datetime + """ + + _validation = { + 'version': {'required': True}, + 'last_update_time': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'version', 'type': 'str'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + } + + def __init__(self, *, version: str, last_update_time, **kwargs) -> None: + super(NodeAgentInformation, self).__init__(**kwargs) + self.version = version + self.last_update_time = last_update_time diff --git a/azext/generated/sdk/batch/v2019_06_01/models/node_counts.py b/azext/generated/sdk/batch/v2019_06_01/models/node_counts.py new file mode 100644 index 00000000..8cb93d4a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/node_counts.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeCounts(Model): + """The number of Compute Nodes in each Compute Node state. + + All required parameters must be populated in order to send to Azure. + + :param creating: Required. The number of Compute Nodes in the creating + state. + :type creating: int + :param idle: Required. The number of Compute Nodes in the idle state. + :type idle: int + :param offline: Required. The number of Compute Nodes in the offline + state. + :type offline: int + :param preempted: Required. The number of Compute Nodes in the preempted + state. + :type preempted: int + :param rebooting: Required. The count of Compute Nodes in the rebooting + state. + :type rebooting: int + :param reimaging: Required. The number of Compute Nodes in the reimaging + state. + :type reimaging: int + :param running: Required. The number of Compute Nodes in the running + state. + :type running: int + :param starting: Required. The number of Compute Nodes in the starting + state. + :type starting: int + :param start_task_failed: Required. The number of Compute Nodes in the + startTaskFailed state. + :type start_task_failed: int + :param leaving_pool: Required. The number of Compute Nodes in the + leavingPool state. + :type leaving_pool: int + :param unknown: Required. The number of Compute Nodes in the unknown + state. + :type unknown: int + :param unusable: Required. The number of Compute Nodes in the unusable + state. + :type unusable: int + :param waiting_for_start_task: Required. The number of Compute Nodes in + the waitingForStartTask state. + :type waiting_for_start_task: int + :param total: Required. The total number of Compute Nodes. + :type total: int + """ + + _validation = { + 'creating': {'required': True}, + 'idle': {'required': True}, + 'offline': {'required': True}, + 'preempted': {'required': True}, + 'rebooting': {'required': True}, + 'reimaging': {'required': True}, + 'running': {'required': True}, + 'starting': {'required': True}, + 'start_task_failed': {'required': True}, + 'leaving_pool': {'required': True}, + 'unknown': {'required': True}, + 'unusable': {'required': True}, + 'waiting_for_start_task': {'required': True}, + 'total': {'required': True}, + } + + _attribute_map = { + 'creating': {'key': 'creating', 'type': 'int'}, + 'idle': {'key': 'idle', 'type': 'int'}, + 'offline': {'key': 'offline', 'type': 'int'}, + 'preempted': {'key': 'preempted', 'type': 'int'}, + 'rebooting': {'key': 'rebooting', 'type': 'int'}, + 'reimaging': {'key': 'reimaging', 'type': 'int'}, + 'running': {'key': 'running', 'type': 'int'}, + 'starting': {'key': 'starting', 'type': 'int'}, + 'start_task_failed': {'key': 'startTaskFailed', 'type': 'int'}, + 'leaving_pool': {'key': 'leavingPool', 'type': 'int'}, + 'unknown': {'key': 'unknown', 'type': 'int'}, + 'unusable': {'key': 'unusable', 'type': 'int'}, + 'waiting_for_start_task': {'key': 'waitingForStartTask', 'type': 'int'}, + 'total': {'key': 'total', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(NodeCounts, self).__init__(**kwargs) + self.creating = kwargs.get('creating', None) + self.idle = kwargs.get('idle', None) + self.offline = kwargs.get('offline', None) + self.preempted = kwargs.get('preempted', None) + self.rebooting = kwargs.get('rebooting', None) + self.reimaging = kwargs.get('reimaging', None) + self.running = kwargs.get('running', None) + self.starting = kwargs.get('starting', None) + self.start_task_failed = kwargs.get('start_task_failed', None) + self.leaving_pool = kwargs.get('leaving_pool', None) + self.unknown = kwargs.get('unknown', None) + self.unusable = kwargs.get('unusable', None) + self.waiting_for_start_task = kwargs.get('waiting_for_start_task', None) + self.total = kwargs.get('total', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/node_counts_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/node_counts_py3.py new file mode 100644 index 00000000..5080115b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/node_counts_py3.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeCounts(Model): + """The number of Compute Nodes in each Compute Node state. + + All required parameters must be populated in order to send to Azure. + + :param creating: Required. The number of Compute Nodes in the creating + state. + :type creating: int + :param idle: Required. The number of Compute Nodes in the idle state. + :type idle: int + :param offline: Required. The number of Compute Nodes in the offline + state. + :type offline: int + :param preempted: Required. The number of Compute Nodes in the preempted + state. + :type preempted: int + :param rebooting: Required. The count of Compute Nodes in the rebooting + state. + :type rebooting: int + :param reimaging: Required. The number of Compute Nodes in the reimaging + state. + :type reimaging: int + :param running: Required. The number of Compute Nodes in the running + state. + :type running: int + :param starting: Required. The number of Compute Nodes in the starting + state. + :type starting: int + :param start_task_failed: Required. The number of Compute Nodes in the + startTaskFailed state. + :type start_task_failed: int + :param leaving_pool: Required. The number of Compute Nodes in the + leavingPool state. + :type leaving_pool: int + :param unknown: Required. The number of Compute Nodes in the unknown + state. + :type unknown: int + :param unusable: Required. The number of Compute Nodes in the unusable + state. + :type unusable: int + :param waiting_for_start_task: Required. The number of Compute Nodes in + the waitingForStartTask state. + :type waiting_for_start_task: int + :param total: Required. The total number of Compute Nodes. + :type total: int + """ + + _validation = { + 'creating': {'required': True}, + 'idle': {'required': True}, + 'offline': {'required': True}, + 'preempted': {'required': True}, + 'rebooting': {'required': True}, + 'reimaging': {'required': True}, + 'running': {'required': True}, + 'starting': {'required': True}, + 'start_task_failed': {'required': True}, + 'leaving_pool': {'required': True}, + 'unknown': {'required': True}, + 'unusable': {'required': True}, + 'waiting_for_start_task': {'required': True}, + 'total': {'required': True}, + } + + _attribute_map = { + 'creating': {'key': 'creating', 'type': 'int'}, + 'idle': {'key': 'idle', 'type': 'int'}, + 'offline': {'key': 'offline', 'type': 'int'}, + 'preempted': {'key': 'preempted', 'type': 'int'}, + 'rebooting': {'key': 'rebooting', 'type': 'int'}, + 'reimaging': {'key': 'reimaging', 'type': 'int'}, + 'running': {'key': 'running', 'type': 'int'}, + 'starting': {'key': 'starting', 'type': 'int'}, + 'start_task_failed': {'key': 'startTaskFailed', 'type': 'int'}, + 'leaving_pool': {'key': 'leavingPool', 'type': 'int'}, + 'unknown': {'key': 'unknown', 'type': 'int'}, + 'unusable': {'key': 'unusable', 'type': 'int'}, + 'waiting_for_start_task': {'key': 'waitingForStartTask', 'type': 'int'}, + 'total': {'key': 'total', 'type': 'int'}, + } + + def __init__(self, *, creating: int, idle: int, offline: int, preempted: int, rebooting: int, reimaging: int, running: int, starting: int, start_task_failed: int, leaving_pool: int, unknown: int, unusable: int, waiting_for_start_task: int, total: int, **kwargs) -> None: + super(NodeCounts, self).__init__(**kwargs) + self.creating = creating + self.idle = idle + self.offline = offline + self.preempted = preempted + self.rebooting = rebooting + self.reimaging = reimaging + self.running = running + self.starting = starting + self.start_task_failed = start_task_failed + self.leaving_pool = leaving_pool + self.unknown = unknown + self.unusable = unusable + self.waiting_for_start_task = waiting_for_start_task + self.total = total diff --git a/azext/generated/sdk/batch/v2019_06_01/models/node_disable_scheduling_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/node_disable_scheduling_parameter.py new file mode 100644 index 00000000..1104c06c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/node_disable_scheduling_parameter.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeDisableSchedulingParameter(Model): + """Options for disabling scheduling on a Compute Node. + + :param node_disable_scheduling_option: What to do with currently running + Tasks when disabling Task scheduling on the Compute Node. The default + value is requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion' + :type node_disable_scheduling_option: str or + ~azure.batch.models.DisableComputeNodeSchedulingOption + """ + + _attribute_map = { + 'node_disable_scheduling_option': {'key': 'nodeDisableSchedulingOption', 'type': 'DisableComputeNodeSchedulingOption'}, + } + + def __init__(self, **kwargs): + super(NodeDisableSchedulingParameter, self).__init__(**kwargs) + self.node_disable_scheduling_option = kwargs.get('node_disable_scheduling_option', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/node_disable_scheduling_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/node_disable_scheduling_parameter_py3.py new file mode 100644 index 00000000..2e621e48 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/node_disable_scheduling_parameter_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeDisableSchedulingParameter(Model): + """Options for disabling scheduling on a Compute Node. + + :param node_disable_scheduling_option: What to do with currently running + Tasks when disabling Task scheduling on the Compute Node. The default + value is requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion' + :type node_disable_scheduling_option: str or + ~azure.batch.models.DisableComputeNodeSchedulingOption + """ + + _attribute_map = { + 'node_disable_scheduling_option': {'key': 'nodeDisableSchedulingOption', 'type': 'DisableComputeNodeSchedulingOption'}, + } + + def __init__(self, *, node_disable_scheduling_option=None, **kwargs) -> None: + super(NodeDisableSchedulingParameter, self).__init__(**kwargs) + self.node_disable_scheduling_option = node_disable_scheduling_option diff --git a/azext/generated/sdk/batch/v2019_06_01/models/node_file.py b/azext/generated/sdk/batch/v2019_06_01/models/node_file.py new file mode 100644 index 00000000..79706922 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/node_file.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeFile(Model): + """Information about a file or directory on a Compute Node. + + :param name: The file path. + :type name: str + :param url: The URL of the file. + :type url: str + :param is_directory: Whether the object represents a directory. + :type is_directory: bool + :param properties: The file properties. + :type properties: ~azure.batch.models.FileProperties + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, + 'properties': {'key': 'properties', 'type': 'FileProperties'}, + } + + def __init__(self, **kwargs): + super(NodeFile, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.url = kwargs.get('url', None) + self.is_directory = kwargs.get('is_directory', None) + self.properties = kwargs.get('properties', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/node_file_paged.py b/azext/generated/sdk/batch/v2019_06_01/models/node_file_paged.py new file mode 100644 index 00000000..4463c944 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/node_file_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class NodeFilePaged(Paged): + """ + A paging container for iterating over a list of :class:`NodeFile ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[NodeFile]'} + } + + def __init__(self, *args, **kwargs): + + super(NodeFilePaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/node_file_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/node_file_py3.py new file mode 100644 index 00000000..c3608299 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/node_file_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeFile(Model): + """Information about a file or directory on a Compute Node. + + :param name: The file path. + :type name: str + :param url: The URL of the file. + :type url: str + :param is_directory: Whether the object represents a directory. + :type is_directory: bool + :param properties: The file properties. + :type properties: ~azure.batch.models.FileProperties + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, + 'properties': {'key': 'properties', 'type': 'FileProperties'}, + } + + def __init__(self, *, name: str=None, url: str=None, is_directory: bool=None, properties=None, **kwargs) -> None: + super(NodeFile, self).__init__(**kwargs) + self.name = name + self.url = url + self.is_directory = is_directory + self.properties = properties diff --git a/azext/generated/sdk/batch/v2019_06_01/models/node_reboot_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/node_reboot_parameter.py new file mode 100644 index 00000000..f9fd7842 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/node_reboot_parameter.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeRebootParameter(Model): + """Options for rebooting a Compute Node. + + :param node_reboot_option: When to reboot the Compute Node and what to do + with currently running Tasks. The default value is requeue. Possible + values include: 'requeue', 'terminate', 'taskCompletion', 'retainedData' + :type node_reboot_option: str or + ~azure.batch.models.ComputeNodeRebootOption + """ + + _attribute_map = { + 'node_reboot_option': {'key': 'nodeRebootOption', 'type': 'ComputeNodeRebootOption'}, + } + + def __init__(self, **kwargs): + super(NodeRebootParameter, self).__init__(**kwargs) + self.node_reboot_option = kwargs.get('node_reboot_option', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/node_reboot_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/node_reboot_parameter_py3.py new file mode 100644 index 00000000..b2d5d1dc --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/node_reboot_parameter_py3.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeRebootParameter(Model): + """Options for rebooting a Compute Node. + + :param node_reboot_option: When to reboot the Compute Node and what to do + with currently running Tasks. The default value is requeue. Possible + values include: 'requeue', 'terminate', 'taskCompletion', 'retainedData' + :type node_reboot_option: str or + ~azure.batch.models.ComputeNodeRebootOption + """ + + _attribute_map = { + 'node_reboot_option': {'key': 'nodeRebootOption', 'type': 'ComputeNodeRebootOption'}, + } + + def __init__(self, *, node_reboot_option=None, **kwargs) -> None: + super(NodeRebootParameter, self).__init__(**kwargs) + self.node_reboot_option = node_reboot_option diff --git a/azext/generated/sdk/batch/v2019_06_01/models/node_reimage_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/node_reimage_parameter.py new file mode 100644 index 00000000..7cb55458 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/node_reimage_parameter.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeReimageParameter(Model): + """Options for reimaging a Compute Node. + + :param node_reimage_option: When to reimage the Compute Node and what to + do with currently running Tasks. The default value is requeue. Possible + values include: 'requeue', 'terminate', 'taskCompletion', 'retainedData' + :type node_reimage_option: str or + ~azure.batch.models.ComputeNodeReimageOption + """ + + _attribute_map = { + 'node_reimage_option': {'key': 'nodeReimageOption', 'type': 'ComputeNodeReimageOption'}, + } + + def __init__(self, **kwargs): + super(NodeReimageParameter, self).__init__(**kwargs) + self.node_reimage_option = kwargs.get('node_reimage_option', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/node_reimage_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/node_reimage_parameter_py3.py new file mode 100644 index 00000000..11aad8cf --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/node_reimage_parameter_py3.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeReimageParameter(Model): + """Options for reimaging a Compute Node. + + :param node_reimage_option: When to reimage the Compute Node and what to + do with currently running Tasks. The default value is requeue. Possible + values include: 'requeue', 'terminate', 'taskCompletion', 'retainedData' + :type node_reimage_option: str or + ~azure.batch.models.ComputeNodeReimageOption + """ + + _attribute_map = { + 'node_reimage_option': {'key': 'nodeReimageOption', 'type': 'ComputeNodeReimageOption'}, + } + + def __init__(self, *, node_reimage_option=None, **kwargs) -> None: + super(NodeReimageParameter, self).__init__(**kwargs) + self.node_reimage_option = node_reimage_option diff --git a/azext/generated/sdk/batch/v2019_06_01/models/node_remove_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/node_remove_parameter.py new file mode 100644 index 00000000..9082b65c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/node_remove_parameter.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeRemoveParameter(Model): + """Options for removing Compute Nodes from a Pool. + + All required parameters must be populated in order to send to Azure. + + :param node_list: Required. A list containing the IDs of the Compute Nodes + to be removed from the specified Pool. + :type node_list: list[str] + :param resize_timeout: The timeout for removal of Compute Nodes to the + Pool. The default value is 15 minutes. The minimum value is 5 minutes. If + you specify a value less than 5 minutes, the Batch service returns an + error; if you are calling the REST API directly, the HTTP status code is + 400 (Bad Request). + :type resize_timeout: timedelta + :param node_deallocation_option: Determines what to do with a Compute Node + and its running task(s) after it has been selected for deallocation. The + default value is requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion', 'retainedData' + :type node_deallocation_option: str or + ~azure.batch.models.ComputeNodeDeallocationOption + """ + + _validation = { + 'node_list': {'required': True, 'max_items': 100}, + } + + _attribute_map = { + 'node_list': {'key': 'nodeList', 'type': '[str]'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, + } + + def __init__(self, **kwargs): + super(NodeRemoveParameter, self).__init__(**kwargs) + self.node_list = kwargs.get('node_list', None) + self.resize_timeout = kwargs.get('resize_timeout', None) + self.node_deallocation_option = kwargs.get('node_deallocation_option', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/node_remove_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/node_remove_parameter_py3.py new file mode 100644 index 00000000..5969c5e4 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/node_remove_parameter_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeRemoveParameter(Model): + """Options for removing Compute Nodes from a Pool. + + All required parameters must be populated in order to send to Azure. + + :param node_list: Required. A list containing the IDs of the Compute Nodes + to be removed from the specified Pool. + :type node_list: list[str] + :param resize_timeout: The timeout for removal of Compute Nodes to the + Pool. The default value is 15 minutes. The minimum value is 5 minutes. If + you specify a value less than 5 minutes, the Batch service returns an + error; if you are calling the REST API directly, the HTTP status code is + 400 (Bad Request). + :type resize_timeout: timedelta + :param node_deallocation_option: Determines what to do with a Compute Node + and its running task(s) after it has been selected for deallocation. The + default value is requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion', 'retainedData' + :type node_deallocation_option: str or + ~azure.batch.models.ComputeNodeDeallocationOption + """ + + _validation = { + 'node_list': {'required': True, 'max_items': 100}, + } + + _attribute_map = { + 'node_list': {'key': 'nodeList', 'type': '[str]'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, + } + + def __init__(self, *, node_list, resize_timeout=None, node_deallocation_option=None, **kwargs) -> None: + super(NodeRemoveParameter, self).__init__(**kwargs) + self.node_list = node_list + self.resize_timeout = resize_timeout + self.node_deallocation_option = node_deallocation_option diff --git a/azext/generated/sdk/batch/v2019_06_01/models/node_update_user_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/node_update_user_parameter.py new file mode 100644 index 00000000..c9e90ab6 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/node_update_user_parameter.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeUpdateUserParameter(Model): + """The set of changes to be made to a user Account on a Compute Node. + + :param password: The password of the Account. The password is required for + Windows Compute Nodes (those created with 'cloudServiceConfiguration', or + created with 'virtualMachineConfiguration' using a Windows Image + reference). For Linux Compute Nodes, the password can optionally be + specified along with the sshPublicKey property. If omitted, any existing + password is removed. + :type password: str + :param expiry_time: The time at which the Account should expire. If + omitted, the default is 1 day from the current time. For Linux Compute + Nodes, the expiryTime has a precision up to a day. + :type expiry_time: datetime + :param ssh_public_key: The SSH public key that can be used for remote + login to the Compute Node. The public key should be compatible with + OpenSSH encoding and should be base 64 encoded. This property can be + specified only for Linux Compute Nodes. If this is specified for a Windows + Compute Node, then the Batch service rejects the request; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + If omitted, any existing SSH public key is removed. + :type ssh_public_key: str + """ + + _attribute_map = { + 'password': {'key': 'password', 'type': 'str'}, + 'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'}, + 'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(NodeUpdateUserParameter, self).__init__(**kwargs) + self.password = kwargs.get('password', None) + self.expiry_time = kwargs.get('expiry_time', None) + self.ssh_public_key = kwargs.get('ssh_public_key', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/node_update_user_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/node_update_user_parameter_py3.py new file mode 100644 index 00000000..9d538529 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/node_update_user_parameter_py3.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeUpdateUserParameter(Model): + """The set of changes to be made to a user Account on a Compute Node. + + :param password: The password of the Account. The password is required for + Windows Compute Nodes (those created with 'cloudServiceConfiguration', or + created with 'virtualMachineConfiguration' using a Windows Image + reference). For Linux Compute Nodes, the password can optionally be + specified along with the sshPublicKey property. If omitted, any existing + password is removed. + :type password: str + :param expiry_time: The time at which the Account should expire. If + omitted, the default is 1 day from the current time. For Linux Compute + Nodes, the expiryTime has a precision up to a day. + :type expiry_time: datetime + :param ssh_public_key: The SSH public key that can be used for remote + login to the Compute Node. The public key should be compatible with + OpenSSH encoding and should be base 64 encoded. This property can be + specified only for Linux Compute Nodes. If this is specified for a Windows + Compute Node, then the Batch service rejects the request; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + If omitted, any existing SSH public key is removed. + :type ssh_public_key: str + """ + + _attribute_map = { + 'password': {'key': 'password', 'type': 'str'}, + 'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'}, + 'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'}, + } + + def __init__(self, *, password: str=None, expiry_time=None, ssh_public_key: str=None, **kwargs) -> None: + super(NodeUpdateUserParameter, self).__init__(**kwargs) + self.password = password + self.expiry_time = expiry_time + self.ssh_public_key = ssh_public_key diff --git a/azext/generated/sdk/batch/v2019_06_01/models/output_file.py b/azext/generated/sdk/batch/v2019_06_01/models/output_file.py new file mode 100644 index 00000000..ca0d324e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/output_file.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFile(Model): + """A specification for uploading files from an Azure Batch Compute Node to + another location after the Batch service has finished executing the Task + process. + + All required parameters must be populated in order to send to Azure. + + :param file_pattern: Required. A pattern indicating which file(s) to + upload. Both relative and absolute paths are supported. Relative paths are + relative to the Task working directory. The following wildcards are + supported: * matches 0 or more characters (for example pattern abc* would + match abc or abcdef), ** matches any directory, ? matches any single + character, [abc] matches one character in the brackets, and [a-c] matches + one character in the range. Brackets can include a negation to match any + character not specified (for example [!abc] matches any character but a, + b, or c). If a file name starts with "." it is ignored by default but may + be matched by specifying it explicitly (for example *.gif will not match + .a.gif, but .*.gif will). A simple example: **\\*.txt matches any file + that does not start in '.' and ends with .txt in the Task working + directory or any subdirectory. If the filename contains a wildcard + character it can be escaped using brackets (for example abc[*] would match + a file named abc*). Note that both \\ and / are treated as directory + separators on Windows, but only / is on Linux. Environment variables + (%var% on Windows or $var on Linux) are expanded prior to the pattern + being applied. + :type file_pattern: str + :param destination: Required. The destination for the output file(s). + :type destination: ~azure.batch.models.OutputFileDestination + :param upload_options: Required. Additional options for the upload + operation, including under what conditions to perform the upload. + :type upload_options: ~azure.batch.models.OutputFileUploadOptions + """ + + _validation = { + 'file_pattern': {'required': True}, + 'destination': {'required': True}, + 'upload_options': {'required': True}, + } + + _attribute_map = { + 'file_pattern': {'key': 'filePattern', 'type': 'str'}, + 'destination': {'key': 'destination', 'type': 'OutputFileDestination'}, + 'upload_options': {'key': 'uploadOptions', 'type': 'OutputFileUploadOptions'}, + } + + def __init__(self, **kwargs): + super(OutputFile, self).__init__(**kwargs) + self.file_pattern = kwargs.get('file_pattern', None) + self.destination = kwargs.get('destination', None) + self.upload_options = kwargs.get('upload_options', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/output_file_blob_container_destination.py b/azext/generated/sdk/batch/v2019_06_01/models/output_file_blob_container_destination.py new file mode 100644 index 00000000..ee86a589 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/output_file_blob_container_destination.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileBlobContainerDestination(Model): + """Specifies a file upload destination within an Azure blob storage container. + + All required parameters must be populated in order to send to Azure. + + :param path: The destination blob or virtual directory within the Azure + Storage container. If filePattern refers to a specific file (i.e. contains + no wildcards), then path is the name of the blob to which to upload that + file. If filePattern contains one or more wildcards (and therefore may + match multiple files), then path is the name of the blob virtual directory + (which is prepended to each blob name) to which to upload the file(s). If + omitted, file(s) are uploaded to the root of the container with a blob + name matching their file name. + :type path: str + :param container_url: Required. The URL of the container within Azure Blob + Storage to which to upload the file(s). The URL must include a Shared + Access Signature (SAS) granting write permissions to the container. + :type container_url: str + """ + + _validation = { + 'container_url': {'required': True}, + } + + _attribute_map = { + 'path': {'key': 'path', 'type': 'str'}, + 'container_url': {'key': 'containerUrl', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(OutputFileBlobContainerDestination, self).__init__(**kwargs) + self.path = kwargs.get('path', None) + self.container_url = kwargs.get('container_url', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/output_file_blob_container_destination_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/output_file_blob_container_destination_py3.py new file mode 100644 index 00000000..3f0c9ce0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/output_file_blob_container_destination_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileBlobContainerDestination(Model): + """Specifies a file upload destination within an Azure blob storage container. + + All required parameters must be populated in order to send to Azure. + + :param path: The destination blob or virtual directory within the Azure + Storage container. If filePattern refers to a specific file (i.e. contains + no wildcards), then path is the name of the blob to which to upload that + file. If filePattern contains one or more wildcards (and therefore may + match multiple files), then path is the name of the blob virtual directory + (which is prepended to each blob name) to which to upload the file(s). If + omitted, file(s) are uploaded to the root of the container with a blob + name matching their file name. + :type path: str + :param container_url: Required. The URL of the container within Azure Blob + Storage to which to upload the file(s). The URL must include a Shared + Access Signature (SAS) granting write permissions to the container. + :type container_url: str + """ + + _validation = { + 'container_url': {'required': True}, + } + + _attribute_map = { + 'path': {'key': 'path', 'type': 'str'}, + 'container_url': {'key': 'containerUrl', 'type': 'str'}, + } + + def __init__(self, *, container_url: str, path: str=None, **kwargs) -> None: + super(OutputFileBlobContainerDestination, self).__init__(**kwargs) + self.path = path + self.container_url = container_url diff --git a/azext/generated/sdk/batch/v2019_06_01/models/output_file_destination.py b/azext/generated/sdk/batch/v2019_06_01/models/output_file_destination.py new file mode 100644 index 00000000..1033743c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/output_file_destination.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileDestination(Model): + """The destination to which a file should be uploaded. + + :param container: A location in Azure blob storage to which files are + uploaded. + :type container: ~azure.batch.models.OutputFileBlobContainerDestination + """ + + _attribute_map = { + 'container': {'key': 'container', 'type': 'OutputFileBlobContainerDestination'}, + } + + def __init__(self, **kwargs): + super(OutputFileDestination, self).__init__(**kwargs) + self.container = kwargs.get('container', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/output_file_destination_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/output_file_destination_py3.py new file mode 100644 index 00000000..e7c652b6 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/output_file_destination_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileDestination(Model): + """The destination to which a file should be uploaded. + + :param container: A location in Azure blob storage to which files are + uploaded. + :type container: ~azure.batch.models.OutputFileBlobContainerDestination + """ + + _attribute_map = { + 'container': {'key': 'container', 'type': 'OutputFileBlobContainerDestination'}, + } + + def __init__(self, *, container=None, **kwargs) -> None: + super(OutputFileDestination, self).__init__(**kwargs) + self.container = container diff --git a/azext/generated/sdk/batch/v2019_06_01/models/output_file_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/output_file_py3.py new file mode 100644 index 00000000..229e47f8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/output_file_py3.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFile(Model): + """A specification for uploading files from an Azure Batch Compute Node to + another location after the Batch service has finished executing the Task + process. + + All required parameters must be populated in order to send to Azure. + + :param file_pattern: Required. A pattern indicating which file(s) to + upload. Both relative and absolute paths are supported. Relative paths are + relative to the Task working directory. The following wildcards are + supported: * matches 0 or more characters (for example pattern abc* would + match abc or abcdef), ** matches any directory, ? matches any single + character, [abc] matches one character in the brackets, and [a-c] matches + one character in the range. Brackets can include a negation to match any + character not specified (for example [!abc] matches any character but a, + b, or c). If a file name starts with "." it is ignored by default but may + be matched by specifying it explicitly (for example *.gif will not match + .a.gif, but .*.gif will). A simple example: **\\*.txt matches any file + that does not start in '.' and ends with .txt in the Task working + directory or any subdirectory. If the filename contains a wildcard + character it can be escaped using brackets (for example abc[*] would match + a file named abc*). Note that both \\ and / are treated as directory + separators on Windows, but only / is on Linux. Environment variables + (%var% on Windows or $var on Linux) are expanded prior to the pattern + being applied. + :type file_pattern: str + :param destination: Required. The destination for the output file(s). + :type destination: ~azure.batch.models.OutputFileDestination + :param upload_options: Required. Additional options for the upload + operation, including under what conditions to perform the upload. + :type upload_options: ~azure.batch.models.OutputFileUploadOptions + """ + + _validation = { + 'file_pattern': {'required': True}, + 'destination': {'required': True}, + 'upload_options': {'required': True}, + } + + _attribute_map = { + 'file_pattern': {'key': 'filePattern', 'type': 'str'}, + 'destination': {'key': 'destination', 'type': 'OutputFileDestination'}, + 'upload_options': {'key': 'uploadOptions', 'type': 'OutputFileUploadOptions'}, + } + + def __init__(self, *, file_pattern: str, destination, upload_options, **kwargs) -> None: + super(OutputFile, self).__init__(**kwargs) + self.file_pattern = file_pattern + self.destination = destination + self.upload_options = upload_options diff --git a/azext/generated/sdk/batch/v2019_06_01/models/output_file_upload_options.py b/azext/generated/sdk/batch/v2019_06_01/models/output_file_upload_options.py new file mode 100644 index 00000000..a5170ff6 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/output_file_upload_options.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileUploadOptions(Model): + """Details about an output file upload operation, including under what + conditions to perform the upload. + + All required parameters must be populated in order to send to Azure. + + :param upload_condition: Required. The conditions under which the Task + output file or set of files should be uploaded. The default is + taskcompletion. Possible values include: 'taskSuccess', 'taskFailure', + 'taskCompletion' + :type upload_condition: str or + ~azure.batch.models.OutputFileUploadCondition + """ + + _validation = { + 'upload_condition': {'required': True}, + } + + _attribute_map = { + 'upload_condition': {'key': 'uploadCondition', 'type': 'OutputFileUploadCondition'}, + } + + def __init__(self, **kwargs): + super(OutputFileUploadOptions, self).__init__(**kwargs) + self.upload_condition = kwargs.get('upload_condition', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/output_file_upload_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/output_file_upload_options_py3.py new file mode 100644 index 00000000..ed58bdcc --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/output_file_upload_options_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileUploadOptions(Model): + """Details about an output file upload operation, including under what + conditions to perform the upload. + + All required parameters must be populated in order to send to Azure. + + :param upload_condition: Required. The conditions under which the Task + output file or set of files should be uploaded. The default is + taskcompletion. Possible values include: 'taskSuccess', 'taskFailure', + 'taskCompletion' + :type upload_condition: str or + ~azure.batch.models.OutputFileUploadCondition + """ + + _validation = { + 'upload_condition': {'required': True}, + } + + _attribute_map = { + 'upload_condition': {'key': 'uploadCondition', 'type': 'OutputFileUploadCondition'}, + } + + def __init__(self, *, upload_condition, **kwargs) -> None: + super(OutputFileUploadOptions, self).__init__(**kwargs) + self.upload_condition = upload_condition diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_add_options.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_add_options.py new file mode 100644 index 00000000..04d968a8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_add_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolAddOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_add_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_add_options_py3.py new file mode 100644 index 00000000..62b3e62b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_add_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolAddOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_add_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_add_parameter.py new file mode 100644 index 00000000..bd5cb743 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_add_parameter.py @@ -0,0 +1,200 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolAddParameter(Model): + """A Pool in the Azure Batch service to add. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the Pool within the + Account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two Pool IDs within an Account that differ only by case). + :type id: str + :param display_name: The display name for the Pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param vm_size: Required. The size of virtual machines in the Pool. All + virtual machines in a Pool are the same size. For information about + available sizes of virtual machines for Cloud Services Pools (pools + created with cloudServiceConfiguration), see Sizes for Cloud Services + (https://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). + Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and + A2V2. For information about available VM sizes for Pools using Images from + the Virtual Machines Marketplace (pools created with + virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) + (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) + or Sizes for Virtual Machines (Windows) + (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). + Batch supports all Azure VM sizes except STANDARD_A0 and those with + premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the Pool. This property and virtualMachineConfiguration are mutually + exclusive and one of the properties must be specified. This property + cannot be specified if the Batch Account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the Pool. This property and cloudServiceConfiguration are mutually + exclusive and one of the properties must be specified. + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param resize_timeout: The timeout for allocation of Compute Nodes to the + Pool. This timeout applies only to manual scaling; it has no effect when + enableAutoScale is set to true. The default value is 15 minutes. The + minimum value is 5 minutes. If you specify a value less than 5 minutes, + the Batch service returns an error; if you are calling the REST API + directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param target_dedicated_nodes: The desired number of dedicated Compute + Nodes in the Pool. This property must not be specified if enableAutoScale + is set to true. If enableAutoScale is set to false, then you must set + either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + Compute Nodes in the Pool. This property must not be specified if + enableAutoScale is set to true. If enableAutoScale is set to false, then + you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the Pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + property is required and the Pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: A formula for the desired number of Compute + Nodes in the Pool. This property must not be specified if enableAutoScale + is set to false. It is required if enableAutoScale is set to true. The + formula is checked for validity before the Pool is created. If the formula + is not valid, the Batch service rejects the request with detailed error + information. For more information about specifying this formula, see + 'Automatically scale Compute Nodes in an Azure Batch Pool' + (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the Pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service returns an error; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + :type auto_scale_evaluation_interval: timedelta + :param enable_inter_node_communication: Whether the Pool permits direct + communication between Compute Nodes. Enabling inter-node communication + limits the maximum size of the Pool due to deployment restrictions on the + Compute Nodes of the Pool. This may result in the Pool not reaching its + desired size. The default value is false. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the Pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A Task specified to run on each Compute Node as it + joins the Pool. The Task runs when the Compute Node is added to the Pool + or when the Compute Node is restarted. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: The list of Certificates to be installed on + each Compute Node in the Pool. For Windows Nodes, the Batch service + installs the Certificates to the specified Certificate store and location. + For Linux Compute Nodes, the Certificates are stored in a directory inside + the Task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this + location. For Certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and Certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of Packages to be + installed on each Compute Node in the Pool. Changes to Package references + affect all new Nodes joining the Pool, but do not affect Compute Nodes + that are already in the Pool until they are rebooted or reimaged. There is + a maximum of 10 Package references on any given Pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each Compute Node in the Pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + Pool creation will fail. + :type application_licenses: list[str] + :param max_tasks_per_node: The maximum number of Tasks that can run + concurrently on a single Compute Node in the Pool. The default value is 1. + The maximum value is the smaller of 4 times the number of cores of the + vmSize of the Pool or 256. + :type max_tasks_per_node: int + :param task_scheduling_policy: How Tasks are distributed across Compute + Nodes in a Pool. If not specified, the default is spread. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param user_accounts: The list of user Accounts to be created on each + Compute Node in the Pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the Pool as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'id': {'required': True}, + 'vm_size': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(PoolAddParameter, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.vm_size = kwargs.get('vm_size', None) + self.cloud_service_configuration = kwargs.get('cloud_service_configuration', None) + self.virtual_machine_configuration = kwargs.get('virtual_machine_configuration', None) + self.resize_timeout = kwargs.get('resize_timeout', None) + self.target_dedicated_nodes = kwargs.get('target_dedicated_nodes', None) + self.target_low_priority_nodes = kwargs.get('target_low_priority_nodes', None) + self.enable_auto_scale = kwargs.get('enable_auto_scale', None) + self.auto_scale_formula = kwargs.get('auto_scale_formula', None) + self.auto_scale_evaluation_interval = kwargs.get('auto_scale_evaluation_interval', None) + self.enable_inter_node_communication = kwargs.get('enable_inter_node_communication', None) + self.network_configuration = kwargs.get('network_configuration', None) + self.start_task = kwargs.get('start_task', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.application_licenses = kwargs.get('application_licenses', None) + self.max_tasks_per_node = kwargs.get('max_tasks_per_node', None) + self.task_scheduling_policy = kwargs.get('task_scheduling_policy', None) + self.user_accounts = kwargs.get('user_accounts', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_add_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_add_parameter_py3.py new file mode 100644 index 00000000..78e1285b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_add_parameter_py3.py @@ -0,0 +1,200 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolAddParameter(Model): + """A Pool in the Azure Batch service to add. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the Pool within the + Account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two Pool IDs within an Account that differ only by case). + :type id: str + :param display_name: The display name for the Pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param vm_size: Required. The size of virtual machines in the Pool. All + virtual machines in a Pool are the same size. For information about + available sizes of virtual machines for Cloud Services Pools (pools + created with cloudServiceConfiguration), see Sizes for Cloud Services + (https://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). + Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and + A2V2. For information about available VM sizes for Pools using Images from + the Virtual Machines Marketplace (pools created with + virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) + (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) + or Sizes for Virtual Machines (Windows) + (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). + Batch supports all Azure VM sizes except STANDARD_A0 and those with + premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the Pool. This property and virtualMachineConfiguration are mutually + exclusive and one of the properties must be specified. This property + cannot be specified if the Batch Account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the Pool. This property and cloudServiceConfiguration are mutually + exclusive and one of the properties must be specified. + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param resize_timeout: The timeout for allocation of Compute Nodes to the + Pool. This timeout applies only to manual scaling; it has no effect when + enableAutoScale is set to true. The default value is 15 minutes. The + minimum value is 5 minutes. If you specify a value less than 5 minutes, + the Batch service returns an error; if you are calling the REST API + directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param target_dedicated_nodes: The desired number of dedicated Compute + Nodes in the Pool. This property must not be specified if enableAutoScale + is set to true. If enableAutoScale is set to false, then you must set + either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + Compute Nodes in the Pool. This property must not be specified if + enableAutoScale is set to true. If enableAutoScale is set to false, then + you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the Pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + property is required and the Pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: A formula for the desired number of Compute + Nodes in the Pool. This property must not be specified if enableAutoScale + is set to false. It is required if enableAutoScale is set to true. The + formula is checked for validity before the Pool is created. If the formula + is not valid, the Batch service rejects the request with detailed error + information. For more information about specifying this formula, see + 'Automatically scale Compute Nodes in an Azure Batch Pool' + (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the Pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service returns an error; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + :type auto_scale_evaluation_interval: timedelta + :param enable_inter_node_communication: Whether the Pool permits direct + communication between Compute Nodes. Enabling inter-node communication + limits the maximum size of the Pool due to deployment restrictions on the + Compute Nodes of the Pool. This may result in the Pool not reaching its + desired size. The default value is false. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the Pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A Task specified to run on each Compute Node as it + joins the Pool. The Task runs when the Compute Node is added to the Pool + or when the Compute Node is restarted. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: The list of Certificates to be installed on + each Compute Node in the Pool. For Windows Nodes, the Batch service + installs the Certificates to the specified Certificate store and location. + For Linux Compute Nodes, the Certificates are stored in a directory inside + the Task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this + location. For Certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and Certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of Packages to be + installed on each Compute Node in the Pool. Changes to Package references + affect all new Nodes joining the Pool, but do not affect Compute Nodes + that are already in the Pool until they are rebooted or reimaged. There is + a maximum of 10 Package references on any given Pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each Compute Node in the Pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + Pool creation will fail. + :type application_licenses: list[str] + :param max_tasks_per_node: The maximum number of Tasks that can run + concurrently on a single Compute Node in the Pool. The default value is 1. + The maximum value is the smaller of 4 times the number of cores of the + vmSize of the Pool or 256. + :type max_tasks_per_node: int + :param task_scheduling_policy: How Tasks are distributed across Compute + Nodes in a Pool. If not specified, the default is spread. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param user_accounts: The list of user Accounts to be created on each + Compute Node in the Pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the Pool as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'id': {'required': True}, + 'vm_size': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, id: str, vm_size: str, display_name: str=None, cloud_service_configuration=None, virtual_machine_configuration=None, resize_timeout=None, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, enable_auto_scale: bool=None, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, enable_inter_node_communication: bool=None, network_configuration=None, start_task=None, certificate_references=None, application_package_references=None, application_licenses=None, max_tasks_per_node: int=None, task_scheduling_policy=None, user_accounts=None, metadata=None, **kwargs) -> None: + super(PoolAddParameter, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.vm_size = vm_size + self.cloud_service_configuration = cloud_service_configuration + self.virtual_machine_configuration = virtual_machine_configuration + self.resize_timeout = resize_timeout + self.target_dedicated_nodes = target_dedicated_nodes + self.target_low_priority_nodes = target_low_priority_nodes + self.enable_auto_scale = enable_auto_scale + self.auto_scale_formula = auto_scale_formula + self.auto_scale_evaluation_interval = auto_scale_evaluation_interval + self.enable_inter_node_communication = enable_inter_node_communication + self.network_configuration = network_configuration + self.start_task = start_task + self.certificate_references = certificate_references + self.application_package_references = application_package_references + self.application_licenses = application_licenses + self.max_tasks_per_node = max_tasks_per_node + self.task_scheduling_policy = task_scheduling_policy + self.user_accounts = user_accounts + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_delete_options.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_delete_options.py new file mode 100644 index 00000000..622241dc --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_delete_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolDeleteOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_delete_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_delete_options_py3.py new file mode 100644 index 00000000..7ca41443 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_delete_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolDeleteOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_disable_auto_scale_options.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_disable_auto_scale_options.py new file mode 100644 index 00000000..96b0bc7c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_disable_auto_scale_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolDisableAutoScaleOptions(Model): + """Additional parameters for disable_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolDisableAutoScaleOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_disable_auto_scale_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_disable_auto_scale_options_py3.py new file mode 100644 index 00000000..4a069bd0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_disable_auto_scale_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolDisableAutoScaleOptions(Model): + """Additional parameters for disable_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolDisableAutoScaleOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_enable_auto_scale_options.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_enable_auto_scale_options.py new file mode 100644 index 00000000..dd77582f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_enable_auto_scale_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEnableAutoScaleOptions(Model): + """Additional parameters for enable_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolEnableAutoScaleOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_enable_auto_scale_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_enable_auto_scale_options_py3.py new file mode 100644 index 00000000..507bd702 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_enable_auto_scale_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEnableAutoScaleOptions(Model): + """Additional parameters for enable_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolEnableAutoScaleOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_enable_auto_scale_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_enable_auto_scale_parameter.py new file mode 100644 index 00000000..bf94fb46 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_enable_auto_scale_parameter.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEnableAutoScaleParameter(Model): + """Options for enabling automatic scaling on a Pool. + + :param auto_scale_formula: The formula for the desired number of Compute + Nodes in the Pool. The formula is checked for validity before it is + applied to the Pool. If the formula is not valid, the Batch service + rejects the request with detailed error information. For more information + about specifying this formula, see Automatically scale Compute Nodes in an + Azure Batch Pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the Pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service rejects the request with an + invalid property value error; if you are calling the REST API directly, + the HTTP status code is 400 (Bad Request). If you specify a new interval, + then the existing autoscale evaluation schedule will be stopped and a new + autoscale evaluation schedule will be started, with its starting time + being the time when this request was issued. + :type auto_scale_evaluation_interval: timedelta + """ + + _attribute_map = { + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(PoolEnableAutoScaleParameter, self).__init__(**kwargs) + self.auto_scale_formula = kwargs.get('auto_scale_formula', None) + self.auto_scale_evaluation_interval = kwargs.get('auto_scale_evaluation_interval', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_enable_auto_scale_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_enable_auto_scale_parameter_py3.py new file mode 100644 index 00000000..1e55da37 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_enable_auto_scale_parameter_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEnableAutoScaleParameter(Model): + """Options for enabling automatic scaling on a Pool. + + :param auto_scale_formula: The formula for the desired number of Compute + Nodes in the Pool. The formula is checked for validity before it is + applied to the Pool. If the formula is not valid, the Batch service + rejects the request with detailed error information. For more information + about specifying this formula, see Automatically scale Compute Nodes in an + Azure Batch Pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the Pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service rejects the request with an + invalid property value error; if you are calling the REST API directly, + the HTTP status code is 400 (Bad Request). If you specify a new interval, + then the existing autoscale evaluation schedule will be stopped and a new + autoscale evaluation schedule will be started, with its starting time + being the time when this request was issued. + :type auto_scale_evaluation_interval: timedelta + """ + + _attribute_map = { + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + } + + def __init__(self, *, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, **kwargs) -> None: + super(PoolEnableAutoScaleParameter, self).__init__(**kwargs) + self.auto_scale_formula = auto_scale_formula + self.auto_scale_evaluation_interval = auto_scale_evaluation_interval diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_endpoint_configuration.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_endpoint_configuration.py new file mode 100644 index 00000000..0f0db164 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_endpoint_configuration.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEndpointConfiguration(Model): + """The endpoint configuration for a Pool. + + All required parameters must be populated in order to send to Azure. + + :param inbound_nat_pools: Required. A list of inbound NAT Pools that can + be used to address specific ports on an individual Compute Node + externally. The maximum number of inbound NAT Pools per Batch Pool is 5. + If the maximum number of inbound NAT Pools is exceeded the request fails + with HTTP status code 400. + :type inbound_nat_pools: list[~azure.batch.models.InboundNATPool] + """ + + _validation = { + 'inbound_nat_pools': {'required': True}, + } + + _attribute_map = { + 'inbound_nat_pools': {'key': 'inboundNATPools', 'type': '[InboundNATPool]'}, + } + + def __init__(self, **kwargs): + super(PoolEndpointConfiguration, self).__init__(**kwargs) + self.inbound_nat_pools = kwargs.get('inbound_nat_pools', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_endpoint_configuration_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_endpoint_configuration_py3.py new file mode 100644 index 00000000..90e7238b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_endpoint_configuration_py3.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEndpointConfiguration(Model): + """The endpoint configuration for a Pool. + + All required parameters must be populated in order to send to Azure. + + :param inbound_nat_pools: Required. A list of inbound NAT Pools that can + be used to address specific ports on an individual Compute Node + externally. The maximum number of inbound NAT Pools per Batch Pool is 5. + If the maximum number of inbound NAT Pools is exceeded the request fails + with HTTP status code 400. + :type inbound_nat_pools: list[~azure.batch.models.InboundNATPool] + """ + + _validation = { + 'inbound_nat_pools': {'required': True}, + } + + _attribute_map = { + 'inbound_nat_pools': {'key': 'inboundNATPools', 'type': '[InboundNATPool]'}, + } + + def __init__(self, *, inbound_nat_pools, **kwargs) -> None: + super(PoolEndpointConfiguration, self).__init__(**kwargs) + self.inbound_nat_pools = inbound_nat_pools diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_evaluate_auto_scale_options.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_evaluate_auto_scale_options.py new file mode 100644 index 00000000..5fbb7ad3 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_evaluate_auto_scale_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEvaluateAutoScaleOptions(Model): + """Additional parameters for evaluate_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolEvaluateAutoScaleOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_evaluate_auto_scale_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_evaluate_auto_scale_options_py3.py new file mode 100644 index 00000000..a2f09b9d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_evaluate_auto_scale_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEvaluateAutoScaleOptions(Model): + """Additional parameters for evaluate_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolEvaluateAutoScaleOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_evaluate_auto_scale_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_evaluate_auto_scale_parameter.py new file mode 100644 index 00000000..123ada7b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_evaluate_auto_scale_parameter.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEvaluateAutoScaleParameter(Model): + """Options for evaluating an automatic scaling formula on a Pool. + + All required parameters must be populated in order to send to Azure. + + :param auto_scale_formula: Required. The formula for the desired number of + Compute Nodes in the Pool. The formula is validated and its results + calculated, but it is not applied to the Pool. To apply the formula to the + Pool, 'Enable automatic scaling on a Pool'. For more information about + specifying this formula, see Automatically scale Compute Nodes in an Azure + Batch Pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + """ + + _validation = { + 'auto_scale_formula': {'required': True}, + } + + _attribute_map = { + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(PoolEvaluateAutoScaleParameter, self).__init__(**kwargs) + self.auto_scale_formula = kwargs.get('auto_scale_formula', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_evaluate_auto_scale_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_evaluate_auto_scale_parameter_py3.py new file mode 100644 index 00000000..ced34d08 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_evaluate_auto_scale_parameter_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEvaluateAutoScaleParameter(Model): + """Options for evaluating an automatic scaling formula on a Pool. + + All required parameters must be populated in order to send to Azure. + + :param auto_scale_formula: Required. The formula for the desired number of + Compute Nodes in the Pool. The formula is validated and its results + calculated, but it is not applied to the Pool. To apply the formula to the + Pool, 'Enable automatic scaling on a Pool'. For more information about + specifying this formula, see Automatically scale Compute Nodes in an Azure + Batch Pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + """ + + _validation = { + 'auto_scale_formula': {'required': True}, + } + + _attribute_map = { + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + } + + def __init__(self, *, auto_scale_formula: str, **kwargs) -> None: + super(PoolEvaluateAutoScaleParameter, self).__init__(**kwargs) + self.auto_scale_formula = auto_scale_formula diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_exists_options.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_exists_options.py new file mode 100644 index 00000000..feffd1c9 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_exists_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolExistsOptions(Model): + """Additional parameters for exists operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolExistsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_exists_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_exists_options_py3.py new file mode 100644 index 00000000..de152edb --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_exists_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolExistsOptions(Model): + """Additional parameters for exists operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolExistsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_get_all_lifetime_statistics_options.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_get_all_lifetime_statistics_options.py new file mode 100644 index 00000000..dbbbcf45 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_get_all_lifetime_statistics_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolGetAllLifetimeStatisticsOptions(Model): + """Additional parameters for get_all_lifetime_statistics operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolGetAllLifetimeStatisticsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_get_all_lifetime_statistics_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_get_all_lifetime_statistics_options_py3.py new file mode 100644 index 00000000..0fc18020 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_get_all_lifetime_statistics_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolGetAllLifetimeStatisticsOptions(Model): + """Additional parameters for get_all_lifetime_statistics operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolGetAllLifetimeStatisticsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_get_options.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_get_options.py new file mode 100644 index 00000000..a629c21e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_get_options.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_get_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_get_options_py3.py new file mode 100644 index 00000000..c0b04bd5 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_get_options_py3.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolGetOptions, self).__init__(**kwargs) + self.select = select + self.expand = expand + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_information.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_information.py new file mode 100644 index 00000000..5543b8da --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_information.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolInformation(Model): + """Specifies how a Job should be assigned to a Pool. + + :param pool_id: The ID of an existing Pool. All the Tasks of the Job will + run on the specified Pool. You must ensure that the Pool referenced by + this property exists. If the Pool does not exist at the time the Batch + service tries to schedule a Job, no Tasks for the Job will run until you + create a Pool with that id. Note that the Batch service will not reject + the Job request; it will simply not run Tasks until the Pool exists. You + must specify either the Pool ID or the auto Pool specification, but not + both. + :type pool_id: str + :param auto_pool_specification: Characteristics for a temporary 'auto + pool'. The Batch service will create this auto Pool when the Job is + submitted. If auto Pool creation fails, the Batch service moves the Job to + a completed state, and the Pool creation error is set in the Job's + scheduling error property. The Batch service manages the lifetime (both + creation and, unless keepAlive is specified, deletion) of the auto Pool. + Any user actions that affect the lifetime of the auto Pool while the Job + is active will result in unexpected behavior. You must specify either the + Pool ID or the auto Pool specification, but not both. + :type auto_pool_specification: ~azure.batch.models.AutoPoolSpecification + """ + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'auto_pool_specification': {'key': 'autoPoolSpecification', 'type': 'AutoPoolSpecification'}, + } + + def __init__(self, **kwargs): + super(PoolInformation, self).__init__(**kwargs) + self.pool_id = kwargs.get('pool_id', None) + self.auto_pool_specification = kwargs.get('auto_pool_specification', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_information_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_information_py3.py new file mode 100644 index 00000000..6074ea76 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_information_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolInformation(Model): + """Specifies how a Job should be assigned to a Pool. + + :param pool_id: The ID of an existing Pool. All the Tasks of the Job will + run on the specified Pool. You must ensure that the Pool referenced by + this property exists. If the Pool does not exist at the time the Batch + service tries to schedule a Job, no Tasks for the Job will run until you + create a Pool with that id. Note that the Batch service will not reject + the Job request; it will simply not run Tasks until the Pool exists. You + must specify either the Pool ID or the auto Pool specification, but not + both. + :type pool_id: str + :param auto_pool_specification: Characteristics for a temporary 'auto + pool'. The Batch service will create this auto Pool when the Job is + submitted. If auto Pool creation fails, the Batch service moves the Job to + a completed state, and the Pool creation error is set in the Job's + scheduling error property. The Batch service manages the lifetime (both + creation and, unless keepAlive is specified, deletion) of the auto Pool. + Any user actions that affect the lifetime of the auto Pool while the Job + is active will result in unexpected behavior. You must specify either the + Pool ID or the auto Pool specification, but not both. + :type auto_pool_specification: ~azure.batch.models.AutoPoolSpecification + """ + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'auto_pool_specification': {'key': 'autoPoolSpecification', 'type': 'AutoPoolSpecification'}, + } + + def __init__(self, *, pool_id: str=None, auto_pool_specification=None, **kwargs) -> None: + super(PoolInformation, self).__init__(**kwargs) + self.pool_id = pool_id + self.auto_pool_specification = auto_pool_specification diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_list_options.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_list_options.py new file mode 100644 index 00000000..add4b622 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_list_options.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Pools can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_list_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_list_options_py3.py new file mode 100644 index 00000000..d27ea2a6 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_list_options_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Pools can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.expand = expand + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_list_usage_metrics_options.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_list_usage_metrics_options.py new file mode 100644 index 00000000..5b52f71a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_list_usage_metrics_options.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolListUsageMetricsOptions(Model): + """Additional parameters for list_usage_metrics operation. + + :param start_time: The earliest time from which to include metrics. This + must be at least two and a half hours before the current time. If not + specified this defaults to the start time of the last aggregation interval + currently available. + :type start_time: datetime + :param end_time: The latest time from which to include metrics. This must + be at least two hours before the current time. If not specified this + defaults to the end time of the last aggregation interval currently + available. + :type end_time: datetime + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 results will be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'start_time': {'key': '', 'type': 'iso-8601'}, + 'end_time': {'key': '', 'type': 'iso-8601'}, + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolListUsageMetricsOptions, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.filter = kwargs.get('filter', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_list_usage_metrics_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_list_usage_metrics_options_py3.py new file mode 100644 index 00000000..2141cfa5 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_list_usage_metrics_options_py3.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolListUsageMetricsOptions(Model): + """Additional parameters for list_usage_metrics operation. + + :param start_time: The earliest time from which to include metrics. This + must be at least two and a half hours before the current time. If not + specified this defaults to the start time of the last aggregation interval + currently available. + :type start_time: datetime + :param end_time: The latest time from which to include metrics. This must + be at least two hours before the current time. If not specified this + defaults to the end time of the last aggregation interval currently + available. + :type end_time: datetime + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 results will be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'start_time': {'key': '', 'type': 'iso-8601'}, + 'end_time': {'key': '', 'type': 'iso-8601'}, + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, start_time=None, end_time=None, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolListUsageMetricsOptions, self).__init__(**kwargs) + self.start_time = start_time + self.end_time = end_time + self.filter = filter + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_node_counts.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_node_counts.py new file mode 100644 index 00000000..cb2374fb --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_node_counts.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolNodeCounts(Model): + """The number of Compute Nodes in each state for a Pool. + + All required parameters must be populated in order to send to Azure. + + :param pool_id: Required. The ID of the Pool. + :type pool_id: str + :param dedicated: The number of dedicated Compute Nodes in each state. + :type dedicated: ~azure.batch.models.NodeCounts + :param low_priority: The number of low priority Compute Nodes in each + state. + :type low_priority: ~azure.batch.models.NodeCounts + """ + + _validation = { + 'pool_id': {'required': True}, + } + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'dedicated': {'key': 'dedicated', 'type': 'NodeCounts'}, + 'low_priority': {'key': 'lowPriority', 'type': 'NodeCounts'}, + } + + def __init__(self, **kwargs): + super(PoolNodeCounts, self).__init__(**kwargs) + self.pool_id = kwargs.get('pool_id', None) + self.dedicated = kwargs.get('dedicated', None) + self.low_priority = kwargs.get('low_priority', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_node_counts_paged.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_node_counts_paged.py new file mode 100644 index 00000000..67159e5d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_node_counts_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class PoolNodeCountsPaged(Paged): + """ + A paging container for iterating over a list of :class:`PoolNodeCounts ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[PoolNodeCounts]'} + } + + def __init__(self, *args, **kwargs): + + super(PoolNodeCountsPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_node_counts_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_node_counts_py3.py new file mode 100644 index 00000000..169ff57c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_node_counts_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolNodeCounts(Model): + """The number of Compute Nodes in each state for a Pool. + + All required parameters must be populated in order to send to Azure. + + :param pool_id: Required. The ID of the Pool. + :type pool_id: str + :param dedicated: The number of dedicated Compute Nodes in each state. + :type dedicated: ~azure.batch.models.NodeCounts + :param low_priority: The number of low priority Compute Nodes in each + state. + :type low_priority: ~azure.batch.models.NodeCounts + """ + + _validation = { + 'pool_id': {'required': True}, + } + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'dedicated': {'key': 'dedicated', 'type': 'NodeCounts'}, + 'low_priority': {'key': 'lowPriority', 'type': 'NodeCounts'}, + } + + def __init__(self, *, pool_id: str, dedicated=None, low_priority=None, **kwargs) -> None: + super(PoolNodeCounts, self).__init__(**kwargs) + self.pool_id = pool_id + self.dedicated = dedicated + self.low_priority = low_priority diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_patch_options.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_patch_options.py new file mode 100644 index 00000000..82b54aef --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_patch_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolPatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolPatchOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_patch_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_patch_options_py3.py new file mode 100644 index 00000000..ff9f10f0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_patch_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolPatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolPatchOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_patch_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_patch_parameter.py new file mode 100644 index 00000000..d2315893 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_patch_parameter.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolPatchParameter(Model): + """The set of changes to be made to a Pool. + + :param start_task: A Task to run on each Compute Node as it joins the + Pool. The Task runs when the Compute Node is added to the Pool or when the + Compute Node is restarted. If this element is present, it overwrites any + existing start Task. If omitted, any existing start Task is left + unchanged. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: A list of Certificates to be installed on + each Compute Node in the Pool. If this element is present, it replaces any + existing Certificate references configured on the Pool. If omitted, any + existing Certificate references are left unchanged. For Windows Nodes, the + Batch service installs the Certificates to the specified Certificate store + and location. For Linux Compute Nodes, the Certificates are stored in a + directory inside the Task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this + location. For Certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and Certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: A list of Packages to be installed + on each Compute Node in the Pool. Changes to Package references affect all + new Nodes joining the Pool, but do not affect Compute Nodes that are + already in the Pool until they are rebooted or reimaged. If this element + is present, it replaces any existing Package references. If you specify an + empty collection, then all Package references are removed from the Pool. + If omitted, any existing Package references are left unchanged. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param metadata: A list of name-value pairs associated with the Pool as + metadata. If this element is present, it replaces any existing metadata + configured on the Pool. If you specify an empty collection, any metadata + is removed from the Pool. If omitted, any existing metadata is left + unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(PoolPatchParameter, self).__init__(**kwargs) + self.start_task = kwargs.get('start_task', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_patch_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_patch_parameter_py3.py new file mode 100644 index 00000000..c81074ea --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_patch_parameter_py3.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolPatchParameter(Model): + """The set of changes to be made to a Pool. + + :param start_task: A Task to run on each Compute Node as it joins the + Pool. The Task runs when the Compute Node is added to the Pool or when the + Compute Node is restarted. If this element is present, it overwrites any + existing start Task. If omitted, any existing start Task is left + unchanged. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: A list of Certificates to be installed on + each Compute Node in the Pool. If this element is present, it replaces any + existing Certificate references configured on the Pool. If omitted, any + existing Certificate references are left unchanged. For Windows Nodes, the + Batch service installs the Certificates to the specified Certificate store + and location. For Linux Compute Nodes, the Certificates are stored in a + directory inside the Task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this + location. For Certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and Certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: A list of Packages to be installed + on each Compute Node in the Pool. Changes to Package references affect all + new Nodes joining the Pool, but do not affect Compute Nodes that are + already in the Pool until they are rebooted or reimaged. If this element + is present, it replaces any existing Package references. If you specify an + empty collection, then all Package references are removed from the Pool. + If omitted, any existing Package references are left unchanged. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param metadata: A list of name-value pairs associated with the Pool as + metadata. If this element is present, it replaces any existing metadata + configured on the Pool. If you specify an empty collection, any metadata + is removed from the Pool. If omitted, any existing metadata is left + unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, start_task=None, certificate_references=None, application_package_references=None, metadata=None, **kwargs) -> None: + super(PoolPatchParameter, self).__init__(**kwargs) + self.start_task = start_task + self.certificate_references = certificate_references + self.application_package_references = application_package_references + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_remove_nodes_options.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_remove_nodes_options.py new file mode 100644 index 00000000..14be8ddd --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_remove_nodes_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolRemoveNodesOptions(Model): + """Additional parameters for remove_nodes operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolRemoveNodesOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_remove_nodes_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_remove_nodes_options_py3.py new file mode 100644 index 00000000..1fe5eb97 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_remove_nodes_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolRemoveNodesOptions(Model): + """Additional parameters for remove_nodes operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolRemoveNodesOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_resize_options.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_resize_options.py new file mode 100644 index 00000000..e83a7ccc --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_resize_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolResizeOptions(Model): + """Additional parameters for resize operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolResizeOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_resize_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_resize_options_py3.py new file mode 100644 index 00000000..ef457e81 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_resize_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolResizeOptions(Model): + """Additional parameters for resize operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolResizeOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_resize_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_resize_parameter.py new file mode 100644 index 00000000..9d4a258d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_resize_parameter.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolResizeParameter(Model): + """Options for changing the size of a Pool. + + :param target_dedicated_nodes: The desired number of dedicated Compute + Nodes in the Pool. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + Compute Nodes in the Pool. + :type target_low_priority_nodes: int + :param resize_timeout: The timeout for allocation of Nodes to the Pool or + removal of Compute Nodes from the Pool. The default value is 15 minutes. + The minimum value is 5 minutes. If you specify a value less than 5 + minutes, the Batch service returns an error; if you are calling the REST + API directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param node_deallocation_option: Determines what to do with a Compute Node + and its running task(s) if the Pool size is decreasing. The default value + is requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion', 'retainedData' + :type node_deallocation_option: str or + ~azure.batch.models.ComputeNodeDeallocationOption + """ + + _attribute_map = { + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, + } + + def __init__(self, **kwargs): + super(PoolResizeParameter, self).__init__(**kwargs) + self.target_dedicated_nodes = kwargs.get('target_dedicated_nodes', None) + self.target_low_priority_nodes = kwargs.get('target_low_priority_nodes', None) + self.resize_timeout = kwargs.get('resize_timeout', None) + self.node_deallocation_option = kwargs.get('node_deallocation_option', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_resize_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_resize_parameter_py3.py new file mode 100644 index 00000000..ebacfd8f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_resize_parameter_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolResizeParameter(Model): + """Options for changing the size of a Pool. + + :param target_dedicated_nodes: The desired number of dedicated Compute + Nodes in the Pool. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + Compute Nodes in the Pool. + :type target_low_priority_nodes: int + :param resize_timeout: The timeout for allocation of Nodes to the Pool or + removal of Compute Nodes from the Pool. The default value is 15 minutes. + The minimum value is 5 minutes. If you specify a value less than 5 + minutes, the Batch service returns an error; if you are calling the REST + API directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param node_deallocation_option: Determines what to do with a Compute Node + and its running task(s) if the Pool size is decreasing. The default value + is requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion', 'retainedData' + :type node_deallocation_option: str or + ~azure.batch.models.ComputeNodeDeallocationOption + """ + + _attribute_map = { + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, + } + + def __init__(self, *, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, resize_timeout=None, node_deallocation_option=None, **kwargs) -> None: + super(PoolResizeParameter, self).__init__(**kwargs) + self.target_dedicated_nodes = target_dedicated_nodes + self.target_low_priority_nodes = target_low_priority_nodes + self.resize_timeout = resize_timeout + self.node_deallocation_option = node_deallocation_option diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_specification.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_specification.py new file mode 100644 index 00000000..ad92bb65 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_specification.py @@ -0,0 +1,189 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolSpecification(Model): + """Specification for creating a new Pool. + + All required parameters must be populated in order to send to Azure. + + :param display_name: The display name for the Pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param vm_size: Required. The size of the virtual machines in the Pool. + All virtual machines in a Pool are the same size. For information about + available sizes of virtual machines in Pools, see Choose a VM size for + Compute Nodes in an Azure Batch Pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the Pool. This property must be specified if the Pool needs to be created + with Azure PaaS VMs. This property and virtualMachineConfiguration are + mutually exclusive and one of the properties must be specified. If neither + is specified then the Batch service returns an error; if you are calling + the REST API directly, the HTTP status code is 400 (Bad Request). This + property cannot be specified if the Batch Account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the Pool. This property must be specified if the Pool needs to be + created with Azure IaaS VMs. This property and cloudServiceConfiguration + are mutually exclusive and one of the properties must be specified. If + neither is specified then the Batch service returns an error; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param max_tasks_per_node: The maximum number of Tasks that can run + concurrently on a single Compute Node in the Pool. The default value is 1. + The maximum value is the smaller of 4 times the number of cores of the + vmSize of the Pool or 256. + :type max_tasks_per_node: int + :param task_scheduling_policy: How Tasks are distributed across Compute + Nodes in a Pool. If not specified, the default is spread. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param resize_timeout: The timeout for allocation of Compute Nodes to the + Pool. This timeout applies only to manual scaling; it has no effect when + enableAutoScale is set to true. The default value is 15 minutes. The + minimum value is 5 minutes. If you specify a value less than 5 minutes, + the Batch service rejects the request with an error; if you are calling + the REST API directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param target_dedicated_nodes: The desired number of dedicated Compute + Nodes in the Pool. This property must not be specified if enableAutoScale + is set to true. If enableAutoScale is set to false, then you must set + either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + Compute Nodes in the Pool. This property must not be specified if + enableAutoScale is set to true. If enableAutoScale is set to false, then + you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the Pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + element is required. The Pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: The formula for the desired number of Compute + Nodes in the Pool. This property must not be specified if enableAutoScale + is set to false. It is required if enableAutoScale is set to true. The + formula is checked for validity before the Pool is created. If the formula + is not valid, the Batch service rejects the request with detailed error + information. + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the Pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service rejects the request with an + invalid property value error; if you are calling the REST API directly, + the HTTP status code is 400 (Bad Request). + :type auto_scale_evaluation_interval: timedelta + :param enable_inter_node_communication: Whether the Pool permits direct + communication between Compute Nodes. Enabling inter-node communication + limits the maximum size of the Pool due to deployment restrictions on the + Compute Nodes of the Pool. This may result in the Pool not reaching its + desired size. The default value is false. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the Pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A Task to run on each Compute Node as it joins the + Pool. The Task runs when the Compute Node is added to the Pool or when the + Compute Node is restarted. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: A list of Certificates to be installed on + each Compute Node in the Pool. For Windows Nodes, the Batch service + installs the Certificates to the specified Certificate store and location. + For Linux Compute Nodes, the Certificates are stored in a directory inside + the Task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this + location. For Certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and Certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of Packages to be + installed on each Compute Node in the Pool. Changes to Package references + affect all new Nodes joining the Pool, but do not affect Compute Nodes + that are already in the Pool until they are rebooted or reimaged. There is + a maximum of 10 Package references on any given Pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each Compute Node in the Pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + Pool creation will fail. The permitted licenses available on the Pool are + 'maya', 'vray', '3dsmax', 'arnold'. An additional charge applies for each + application license added to the Pool. + :type application_licenses: list[str] + :param user_accounts: The list of user Accounts to be created on each + Compute Node in the Pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the Pool as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'vm_size': {'required': True}, + } + + _attribute_map = { + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(PoolSpecification, self).__init__(**kwargs) + self.display_name = kwargs.get('display_name', None) + self.vm_size = kwargs.get('vm_size', None) + self.cloud_service_configuration = kwargs.get('cloud_service_configuration', None) + self.virtual_machine_configuration = kwargs.get('virtual_machine_configuration', None) + self.max_tasks_per_node = kwargs.get('max_tasks_per_node', None) + self.task_scheduling_policy = kwargs.get('task_scheduling_policy', None) + self.resize_timeout = kwargs.get('resize_timeout', None) + self.target_dedicated_nodes = kwargs.get('target_dedicated_nodes', None) + self.target_low_priority_nodes = kwargs.get('target_low_priority_nodes', None) + self.enable_auto_scale = kwargs.get('enable_auto_scale', None) + self.auto_scale_formula = kwargs.get('auto_scale_formula', None) + self.auto_scale_evaluation_interval = kwargs.get('auto_scale_evaluation_interval', None) + self.enable_inter_node_communication = kwargs.get('enable_inter_node_communication', None) + self.network_configuration = kwargs.get('network_configuration', None) + self.start_task = kwargs.get('start_task', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.application_licenses = kwargs.get('application_licenses', None) + self.user_accounts = kwargs.get('user_accounts', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_specification_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_specification_py3.py new file mode 100644 index 00000000..008c499e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_specification_py3.py @@ -0,0 +1,189 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolSpecification(Model): + """Specification for creating a new Pool. + + All required parameters must be populated in order to send to Azure. + + :param display_name: The display name for the Pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param vm_size: Required. The size of the virtual machines in the Pool. + All virtual machines in a Pool are the same size. For information about + available sizes of virtual machines in Pools, see Choose a VM size for + Compute Nodes in an Azure Batch Pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the Pool. This property must be specified if the Pool needs to be created + with Azure PaaS VMs. This property and virtualMachineConfiguration are + mutually exclusive and one of the properties must be specified. If neither + is specified then the Batch service returns an error; if you are calling + the REST API directly, the HTTP status code is 400 (Bad Request). This + property cannot be specified if the Batch Account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the Pool. This property must be specified if the Pool needs to be + created with Azure IaaS VMs. This property and cloudServiceConfiguration + are mutually exclusive and one of the properties must be specified. If + neither is specified then the Batch service returns an error; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param max_tasks_per_node: The maximum number of Tasks that can run + concurrently on a single Compute Node in the Pool. The default value is 1. + The maximum value is the smaller of 4 times the number of cores of the + vmSize of the Pool or 256. + :type max_tasks_per_node: int + :param task_scheduling_policy: How Tasks are distributed across Compute + Nodes in a Pool. If not specified, the default is spread. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param resize_timeout: The timeout for allocation of Compute Nodes to the + Pool. This timeout applies only to manual scaling; it has no effect when + enableAutoScale is set to true. The default value is 15 minutes. The + minimum value is 5 minutes. If you specify a value less than 5 minutes, + the Batch service rejects the request with an error; if you are calling + the REST API directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param target_dedicated_nodes: The desired number of dedicated Compute + Nodes in the Pool. This property must not be specified if enableAutoScale + is set to true. If enableAutoScale is set to false, then you must set + either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + Compute Nodes in the Pool. This property must not be specified if + enableAutoScale is set to true. If enableAutoScale is set to false, then + you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the Pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + element is required. The Pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: The formula for the desired number of Compute + Nodes in the Pool. This property must not be specified if enableAutoScale + is set to false. It is required if enableAutoScale is set to true. The + formula is checked for validity before the Pool is created. If the formula + is not valid, the Batch service rejects the request with detailed error + information. + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the Pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service rejects the request with an + invalid property value error; if you are calling the REST API directly, + the HTTP status code is 400 (Bad Request). + :type auto_scale_evaluation_interval: timedelta + :param enable_inter_node_communication: Whether the Pool permits direct + communication between Compute Nodes. Enabling inter-node communication + limits the maximum size of the Pool due to deployment restrictions on the + Compute Nodes of the Pool. This may result in the Pool not reaching its + desired size. The default value is false. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the Pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A Task to run on each Compute Node as it joins the + Pool. The Task runs when the Compute Node is added to the Pool or when the + Compute Node is restarted. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: A list of Certificates to be installed on + each Compute Node in the Pool. For Windows Nodes, the Batch service + installs the Certificates to the specified Certificate store and location. + For Linux Compute Nodes, the Certificates are stored in a directory inside + the Task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this + location. For Certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and Certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of Packages to be + installed on each Compute Node in the Pool. Changes to Package references + affect all new Nodes joining the Pool, but do not affect Compute Nodes + that are already in the Pool until they are rebooted or reimaged. There is + a maximum of 10 Package references on any given Pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each Compute Node in the Pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + Pool creation will fail. The permitted licenses available on the Pool are + 'maya', 'vray', '3dsmax', 'arnold'. An additional charge applies for each + application license added to the Pool. + :type application_licenses: list[str] + :param user_accounts: The list of user Accounts to be created on each + Compute Node in the Pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the Pool as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'vm_size': {'required': True}, + } + + _attribute_map = { + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, vm_size: str, display_name: str=None, cloud_service_configuration=None, virtual_machine_configuration=None, max_tasks_per_node: int=None, task_scheduling_policy=None, resize_timeout=None, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, enable_auto_scale: bool=None, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, enable_inter_node_communication: bool=None, network_configuration=None, start_task=None, certificate_references=None, application_package_references=None, application_licenses=None, user_accounts=None, metadata=None, **kwargs) -> None: + super(PoolSpecification, self).__init__(**kwargs) + self.display_name = display_name + self.vm_size = vm_size + self.cloud_service_configuration = cloud_service_configuration + self.virtual_machine_configuration = virtual_machine_configuration + self.max_tasks_per_node = max_tasks_per_node + self.task_scheduling_policy = task_scheduling_policy + self.resize_timeout = resize_timeout + self.target_dedicated_nodes = target_dedicated_nodes + self.target_low_priority_nodes = target_low_priority_nodes + self.enable_auto_scale = enable_auto_scale + self.auto_scale_formula = auto_scale_formula + self.auto_scale_evaluation_interval = auto_scale_evaluation_interval + self.enable_inter_node_communication = enable_inter_node_communication + self.network_configuration = network_configuration + self.start_task = start_task + self.certificate_references = certificate_references + self.application_package_references = application_package_references + self.application_licenses = application_licenses + self.user_accounts = user_accounts + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_statistics.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_statistics.py new file mode 100644 index 00000000..2b2eb837 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_statistics.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolStatistics(Model): + """Contains utilization and resource usage statistics for the lifetime of a + Pool. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL for the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param usage_stats: Statistics related to Pool usage, such as the amount + of core-time used. + :type usage_stats: ~azure.batch.models.UsageStatistics + :param resource_stats: Statistics related to resource consumption by + Compute Nodes in the Pool. + :type resource_stats: ~azure.batch.models.ResourceStatistics + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'usage_stats': {'key': 'usageStats', 'type': 'UsageStatistics'}, + 'resource_stats': {'key': 'resourceStats', 'type': 'ResourceStatistics'}, + } + + def __init__(self, **kwargs): + super(PoolStatistics, self).__init__(**kwargs) + self.url = kwargs.get('url', None) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.usage_stats = kwargs.get('usage_stats', None) + self.resource_stats = kwargs.get('resource_stats', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_statistics_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_statistics_py3.py new file mode 100644 index 00000000..582b013f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_statistics_py3.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolStatistics(Model): + """Contains utilization and resource usage statistics for the lifetime of a + Pool. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL for the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param usage_stats: Statistics related to Pool usage, such as the amount + of core-time used. + :type usage_stats: ~azure.batch.models.UsageStatistics + :param resource_stats: Statistics related to resource consumption by + Compute Nodes in the Pool. + :type resource_stats: ~azure.batch.models.ResourceStatistics + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'usage_stats': {'key': 'usageStats', 'type': 'UsageStatistics'}, + 'resource_stats': {'key': 'resourceStats', 'type': 'ResourceStatistics'}, + } + + def __init__(self, *, url: str, start_time, last_update_time, usage_stats=None, resource_stats=None, **kwargs) -> None: + super(PoolStatistics, self).__init__(**kwargs) + self.url = url + self.start_time = start_time + self.last_update_time = last_update_time + self.usage_stats = usage_stats + self.resource_stats = resource_stats diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_stop_resize_options.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_stop_resize_options.py new file mode 100644 index 00000000..ab8fec73 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_stop_resize_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolStopResizeOptions(Model): + """Additional parameters for stop_resize operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolStopResizeOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_stop_resize_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_stop_resize_options_py3.py new file mode 100644 index 00000000..d5cc404e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_stop_resize_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolStopResizeOptions(Model): + """Additional parameters for stop_resize operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolStopResizeOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_update_properties_options.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_update_properties_options.py new file mode 100644 index 00000000..ca7f97cb --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_update_properties_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUpdatePropertiesOptions(Model): + """Additional parameters for update_properties operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolUpdatePropertiesOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_update_properties_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_update_properties_options_py3.py new file mode 100644 index 00000000..edf5065c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_update_properties_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUpdatePropertiesOptions(Model): + """Additional parameters for update_properties operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolUpdatePropertiesOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_update_properties_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_update_properties_parameter.py new file mode 100644 index 00000000..a3705f0b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_update_properties_parameter.py @@ -0,0 +1,77 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUpdatePropertiesParameter(Model): + """The set of changes to be made to a Pool. + + All required parameters must be populated in order to send to Azure. + + :param start_task: A Task to run on each Compute Node as it joins the + Pool. The Task runs when the Compute Node is added to the Pool or when the + Compute Node is restarted. If this element is present, it overwrites any + existing start Task. If omitted, any existing start Task is removed from + the Pool. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: Required. A list of Certificates to be + installed on each Compute Node in the Pool. This list replaces any + existing Certificate references configured on the Pool. If you specify an + empty collection, any existing Certificate references are removed from the + Pool. For Windows Nodes, the Batch service installs the Certificates to + the specified Certificate store and location. For Linux Compute Nodes, the + Certificates are stored in a directory inside the Task working directory + and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the + Task to query for this location. For Certificates with visibility of + 'remoteUser', a 'certs' directory is created in the user's home directory + (e.g., /home/{user-name}/certs) and Certificates are placed in that + directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: Required. The list of Application + Packages to be installed on each Compute Node in the Pool. The list + replaces any existing Application Package references on the Pool. Changes + to Application Package references affect all new Compute Nodes joining the + Pool, but do not affect Compute Nodes that are already in the Pool until + they are rebooted or reimaged. There is a maximum of 10 Application + Package references on any given Pool. If omitted, or if you specify an + empty collection, any existing Application Packages references are removed + from the Pool. A maximum of 10 references may be specified on a given + Pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param metadata: Required. A list of name-value pairs associated with the + Pool as metadata. This list replaces any existing metadata configured on + the Pool. If omitted, or if you specify an empty collection, any existing + metadata is removed from the Pool. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'certificate_references': {'required': True}, + 'application_package_references': {'required': True}, + 'metadata': {'required': True}, + } + + _attribute_map = { + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(PoolUpdatePropertiesParameter, self).__init__(**kwargs) + self.start_task = kwargs.get('start_task', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_update_properties_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_update_properties_parameter_py3.py new file mode 100644 index 00000000..5d061050 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_update_properties_parameter_py3.py @@ -0,0 +1,77 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUpdatePropertiesParameter(Model): + """The set of changes to be made to a Pool. + + All required parameters must be populated in order to send to Azure. + + :param start_task: A Task to run on each Compute Node as it joins the + Pool. The Task runs when the Compute Node is added to the Pool or when the + Compute Node is restarted. If this element is present, it overwrites any + existing start Task. If omitted, any existing start Task is removed from + the Pool. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: Required. A list of Certificates to be + installed on each Compute Node in the Pool. This list replaces any + existing Certificate references configured on the Pool. If you specify an + empty collection, any existing Certificate references are removed from the + Pool. For Windows Nodes, the Batch service installs the Certificates to + the specified Certificate store and location. For Linux Compute Nodes, the + Certificates are stored in a directory inside the Task working directory + and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the + Task to query for this location. For Certificates with visibility of + 'remoteUser', a 'certs' directory is created in the user's home directory + (e.g., /home/{user-name}/certs) and Certificates are placed in that + directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: Required. The list of Application + Packages to be installed on each Compute Node in the Pool. The list + replaces any existing Application Package references on the Pool. Changes + to Application Package references affect all new Compute Nodes joining the + Pool, but do not affect Compute Nodes that are already in the Pool until + they are rebooted or reimaged. There is a maximum of 10 Application + Package references on any given Pool. If omitted, or if you specify an + empty collection, any existing Application Packages references are removed + from the Pool. A maximum of 10 references may be specified on a given + Pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param metadata: Required. A list of name-value pairs associated with the + Pool as metadata. This list replaces any existing metadata configured on + the Pool. If omitted, or if you specify an empty collection, any existing + metadata is removed from the Pool. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'certificate_references': {'required': True}, + 'application_package_references': {'required': True}, + 'metadata': {'required': True}, + } + + _attribute_map = { + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, certificate_references, application_package_references, metadata, start_task=None, **kwargs) -> None: + super(PoolUpdatePropertiesParameter, self).__init__(**kwargs) + self.start_task = start_task + self.certificate_references = certificate_references + self.application_package_references = application_package_references + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_usage_metrics.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_usage_metrics.py new file mode 100644 index 00000000..c8ae169e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_usage_metrics.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUsageMetrics(Model): + """Usage metrics for a Pool across an aggregation interval. + + All required parameters must be populated in order to send to Azure. + + :param pool_id: Required. The ID of the Pool whose metrics are aggregated + in this entry. + :type pool_id: str + :param start_time: Required. The start time of the aggregation interval + covered by this entry. + :type start_time: datetime + :param end_time: Required. The end time of the aggregation interval + covered by this entry. + :type end_time: datetime + :param vm_size: Required. The size of virtual machines in the Pool. All + VMs in a Pool are the same size. For information about available sizes of + virtual machines in Pools, see Choose a VM size for Compute Nodes in an + Azure Batch Pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param total_core_hours: Required. The total core hours used in the Pool + during this aggregation interval. + :type total_core_hours: float + """ + + _validation = { + 'pool_id': {'required': True}, + 'start_time': {'required': True}, + 'end_time': {'required': True}, + 'vm_size': {'required': True}, + 'total_core_hours': {'required': True}, + } + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'total_core_hours': {'key': 'totalCoreHours', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(PoolUsageMetrics, self).__init__(**kwargs) + self.pool_id = kwargs.get('pool_id', None) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.vm_size = kwargs.get('vm_size', None) + self.total_core_hours = kwargs.get('total_core_hours', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_usage_metrics_paged.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_usage_metrics_paged.py new file mode 100644 index 00000000..891554f1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_usage_metrics_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class PoolUsageMetricsPaged(Paged): + """ + A paging container for iterating over a list of :class:`PoolUsageMetrics ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[PoolUsageMetrics]'} + } + + def __init__(self, *args, **kwargs): + + super(PoolUsageMetricsPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/pool_usage_metrics_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/pool_usage_metrics_py3.py new file mode 100644 index 00000000..c0842745 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/pool_usage_metrics_py3.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUsageMetrics(Model): + """Usage metrics for a Pool across an aggregation interval. + + All required parameters must be populated in order to send to Azure. + + :param pool_id: Required. The ID of the Pool whose metrics are aggregated + in this entry. + :type pool_id: str + :param start_time: Required. The start time of the aggregation interval + covered by this entry. + :type start_time: datetime + :param end_time: Required. The end time of the aggregation interval + covered by this entry. + :type end_time: datetime + :param vm_size: Required. The size of virtual machines in the Pool. All + VMs in a Pool are the same size. For information about available sizes of + virtual machines in Pools, see Choose a VM size for Compute Nodes in an + Azure Batch Pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param total_core_hours: Required. The total core hours used in the Pool + during this aggregation interval. + :type total_core_hours: float + """ + + _validation = { + 'pool_id': {'required': True}, + 'start_time': {'required': True}, + 'end_time': {'required': True}, + 'vm_size': {'required': True}, + 'total_core_hours': {'required': True}, + } + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'total_core_hours': {'key': 'totalCoreHours', 'type': 'float'}, + } + + def __init__(self, *, pool_id: str, start_time, end_time, vm_size: str, total_core_hours: float, **kwargs) -> None: + super(PoolUsageMetrics, self).__init__(**kwargs) + self.pool_id = pool_id + self.start_time = start_time + self.end_time = end_time + self.vm_size = vm_size + self.total_core_hours = total_core_hours diff --git a/azext/generated/sdk/batch/v2019_06_01/models/recent_job.py b/azext/generated/sdk/batch/v2019_06_01/models/recent_job.py new file mode 100644 index 00000000..9aacfff0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/recent_job.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RecentJob(Model): + """Information about the most recent Job to run under the Job Schedule. + + :param id: The ID of the Job. + :type id: str + :param url: The URL of the Job. + :type url: str + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(RecentJob, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.url = kwargs.get('url', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/recent_job_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/recent_job_py3.py new file mode 100644 index 00000000..95286729 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/recent_job_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RecentJob(Model): + """Information about the most recent Job to run under the Job Schedule. + + :param id: The ID of the Job. + :type id: str + :param url: The URL of the Job. + :type url: str + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + def __init__(self, *, id: str=None, url: str=None, **kwargs) -> None: + super(RecentJob, self).__init__(**kwargs) + self.id = id + self.url = url diff --git a/azext/generated/sdk/batch/v2019_06_01/models/resize_error.py b/azext/generated/sdk/batch/v2019_06_01/models/resize_error.py new file mode 100644 index 00000000..ba9061f6 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/resize_error.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResizeError(Model): + """An error that occurred when resizing a Pool. + + :param code: An identifier for the Pool resize error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the Pool resize error, intended to be + suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the Pool + resize error. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(ResizeError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.values = kwargs.get('values', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/resize_error_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/resize_error_py3.py new file mode 100644 index 00000000..1f7295df --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/resize_error_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResizeError(Model): + """An error that occurred when resizing a Pool. + + :param code: An identifier for the Pool resize error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the Pool resize error, intended to be + suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the Pool + resize error. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, code: str=None, message: str=None, values=None, **kwargs) -> None: + super(ResizeError, self).__init__(**kwargs) + self.code = code + self.message = message + self.values = values diff --git a/azext/generated/sdk/batch/v2019_06_01/models/resource_file.py b/azext/generated/sdk/batch/v2019_06_01/models/resource_file.py new file mode 100644 index 00000000..bb221d36 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/resource_file.py @@ -0,0 +1,86 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResourceFile(Model): + """A single file or multiple files to be downloaded to a Compute Node. + + :param auto_storage_container_name: The storage container name in the auto + storage Account. The autoStorageContainerName, storageContainerUrl and + httpUrl properties are mutually exclusive and one of them must be + specified. + :type auto_storage_container_name: str + :param storage_container_url: The URL of the blob container within Azure + Blob Storage. The autoStorageContainerName, storageContainerUrl and + httpUrl properties are mutually exclusive and one of them must be + specified. This URL must be readable and listable using anonymous access; + that is, the Batch service does not present any credentials when + downloading blobs from the container. There are two ways to get such a URL + for a container in Azure storage: include a Shared Access Signature (SAS) + granting read and list permissions on the container, or set the ACL for + the container to allow public access. + :type storage_container_url: str + :param http_url: The URL of the file to download. The + autoStorageContainerName, storageContainerUrl and httpUrl properties are + mutually exclusive and one of them must be specified. If the URL points to + Azure Blob Storage, it must be readable using anonymous access; that is, + the Batch service does not present any credentials when downloading the + blob. There are two ways to get such a URL for a blob in Azure storage: + include a Shared Access Signature (SAS) granting read permissions on the + blob, or set the ACL for the blob or its container to allow public access. + :type http_url: str + :param blob_prefix: The blob prefix to use when downloading blobs from an + Azure Storage container. Only the blobs whose names begin with the + specified prefix will be downloaded. The property is valid only when + autoStorageContainerName or storageContainerUrl is used. This prefix can + be a partial filename or a subdirectory. If a prefix is not specified, all + the files in the container will be downloaded. + :type blob_prefix: str + :param file_path: The location on the Compute Node to which to download + the file(s), relative to the Task's working directory. If the httpUrl + property is specified, the filePath is required and describes the path + which the file will be downloaded to, including the filename. Otherwise, + if the autoStorageContainerName or storageContainerUrl property is + specified, filePath is optional and is the directory to download the files + to. In the case where filePath is used as a directory, any directory + structure already associated with the input data will be retained in full + and appended to the specified filePath directory. The specified relative + path cannot break out of the Task's working directory (for example by + using '..'). + :type file_path: str + :param file_mode: The file permission mode attribute in octal format. This + property applies only to files being downloaded to Linux Compute Nodes. It + will be ignored if it is specified for a resourceFile which will be + downloaded to a Windows Compute Node. If this property is not specified + for a Linux Compute Node, then a default value of 0770 is applied to the + file. + :type file_mode: str + """ + + _attribute_map = { + 'auto_storage_container_name': {'key': 'autoStorageContainerName', 'type': 'str'}, + 'storage_container_url': {'key': 'storageContainerUrl', 'type': 'str'}, + 'http_url': {'key': 'httpUrl', 'type': 'str'}, + 'blob_prefix': {'key': 'blobPrefix', 'type': 'str'}, + 'file_path': {'key': 'filePath', 'type': 'str'}, + 'file_mode': {'key': 'fileMode', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ResourceFile, self).__init__(**kwargs) + self.auto_storage_container_name = kwargs.get('auto_storage_container_name', None) + self.storage_container_url = kwargs.get('storage_container_url', None) + self.http_url = kwargs.get('http_url', None) + self.blob_prefix = kwargs.get('blob_prefix', None) + self.file_path = kwargs.get('file_path', None) + self.file_mode = kwargs.get('file_mode', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/resource_file_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/resource_file_py3.py new file mode 100644 index 00000000..29d3d7b1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/resource_file_py3.py @@ -0,0 +1,86 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResourceFile(Model): + """A single file or multiple files to be downloaded to a Compute Node. + + :param auto_storage_container_name: The storage container name in the auto + storage Account. The autoStorageContainerName, storageContainerUrl and + httpUrl properties are mutually exclusive and one of them must be + specified. + :type auto_storage_container_name: str + :param storage_container_url: The URL of the blob container within Azure + Blob Storage. The autoStorageContainerName, storageContainerUrl and + httpUrl properties are mutually exclusive and one of them must be + specified. This URL must be readable and listable using anonymous access; + that is, the Batch service does not present any credentials when + downloading blobs from the container. There are two ways to get such a URL + for a container in Azure storage: include a Shared Access Signature (SAS) + granting read and list permissions on the container, or set the ACL for + the container to allow public access. + :type storage_container_url: str + :param http_url: The URL of the file to download. The + autoStorageContainerName, storageContainerUrl and httpUrl properties are + mutually exclusive and one of them must be specified. If the URL points to + Azure Blob Storage, it must be readable using anonymous access; that is, + the Batch service does not present any credentials when downloading the + blob. There are two ways to get such a URL for a blob in Azure storage: + include a Shared Access Signature (SAS) granting read permissions on the + blob, or set the ACL for the blob or its container to allow public access. + :type http_url: str + :param blob_prefix: The blob prefix to use when downloading blobs from an + Azure Storage container. Only the blobs whose names begin with the + specified prefix will be downloaded. The property is valid only when + autoStorageContainerName or storageContainerUrl is used. This prefix can + be a partial filename or a subdirectory. If a prefix is not specified, all + the files in the container will be downloaded. + :type blob_prefix: str + :param file_path: The location on the Compute Node to which to download + the file(s), relative to the Task's working directory. If the httpUrl + property is specified, the filePath is required and describes the path + which the file will be downloaded to, including the filename. Otherwise, + if the autoStorageContainerName or storageContainerUrl property is + specified, filePath is optional and is the directory to download the files + to. In the case where filePath is used as a directory, any directory + structure already associated with the input data will be retained in full + and appended to the specified filePath directory. The specified relative + path cannot break out of the Task's working directory (for example by + using '..'). + :type file_path: str + :param file_mode: The file permission mode attribute in octal format. This + property applies only to files being downloaded to Linux Compute Nodes. It + will be ignored if it is specified for a resourceFile which will be + downloaded to a Windows Compute Node. If this property is not specified + for a Linux Compute Node, then a default value of 0770 is applied to the + file. + :type file_mode: str + """ + + _attribute_map = { + 'auto_storage_container_name': {'key': 'autoStorageContainerName', 'type': 'str'}, + 'storage_container_url': {'key': 'storageContainerUrl', 'type': 'str'}, + 'http_url': {'key': 'httpUrl', 'type': 'str'}, + 'blob_prefix': {'key': 'blobPrefix', 'type': 'str'}, + 'file_path': {'key': 'filePath', 'type': 'str'}, + 'file_mode': {'key': 'fileMode', 'type': 'str'}, + } + + def __init__(self, *, auto_storage_container_name: str=None, storage_container_url: str=None, http_url: str=None, blob_prefix: str=None, file_path: str=None, file_mode: str=None, **kwargs) -> None: + super(ResourceFile, self).__init__(**kwargs) + self.auto_storage_container_name = auto_storage_container_name + self.storage_container_url = storage_container_url + self.http_url = http_url + self.blob_prefix = blob_prefix + self.file_path = file_path + self.file_mode = file_mode diff --git a/azext/generated/sdk/batch/v2019_06_01/models/resource_statistics.py b/azext/generated/sdk/batch/v2019_06_01/models/resource_statistics.py new file mode 100644 index 00000000..7861657e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/resource_statistics.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResourceStatistics(Model): + """Statistics related to resource consumption by Compute Nodes in a Pool. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param avg_cpu_percentage: Required. The average CPU usage across all + Compute Nodes in the Pool (percentage per node). + :type avg_cpu_percentage: float + :param avg_memory_gi_b: Required. The average memory usage in GiB across + all Compute Nodes in the Pool. + :type avg_memory_gi_b: float + :param peak_memory_gi_b: Required. The peak memory usage in GiB across all + Compute Nodes in the Pool. + :type peak_memory_gi_b: float + :param avg_disk_gi_b: Required. The average used disk space in GiB across + all Compute Nodes in the Pool. + :type avg_disk_gi_b: float + :param peak_disk_gi_b: Required. The peak used disk space in GiB across + all Compute Nodes in the Pool. + :type peak_disk_gi_b: float + :param disk_read_iops: Required. The total number of disk read operations + across all Compute Nodes in the Pool. + :type disk_read_iops: long + :param disk_write_iops: Required. The total number of disk write + operations across all Compute Nodes in the Pool. + :type disk_write_iops: long + :param disk_read_gi_b: Required. The total amount of data in GiB of disk + reads across all Compute Nodes in the Pool. + :type disk_read_gi_b: float + :param disk_write_gi_b: Required. The total amount of data in GiB of disk + writes across all Compute Nodes in the Pool. + :type disk_write_gi_b: float + :param network_read_gi_b: Required. The total amount of data in GiB of + network reads across all Compute Nodes in the Pool. + :type network_read_gi_b: float + :param network_write_gi_b: Required. The total amount of data in GiB of + network writes across all Compute Nodes in the Pool. + :type network_write_gi_b: float + """ + + _validation = { + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'avg_cpu_percentage': {'required': True}, + 'avg_memory_gi_b': {'required': True}, + 'peak_memory_gi_b': {'required': True}, + 'avg_disk_gi_b': {'required': True}, + 'peak_disk_gi_b': {'required': True}, + 'disk_read_iops': {'required': True}, + 'disk_write_iops': {'required': True}, + 'disk_read_gi_b': {'required': True}, + 'disk_write_gi_b': {'required': True}, + 'network_read_gi_b': {'required': True}, + 'network_write_gi_b': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'avg_cpu_percentage': {'key': 'avgCPUPercentage', 'type': 'float'}, + 'avg_memory_gi_b': {'key': 'avgMemoryGiB', 'type': 'float'}, + 'peak_memory_gi_b': {'key': 'peakMemoryGiB', 'type': 'float'}, + 'avg_disk_gi_b': {'key': 'avgDiskGiB', 'type': 'float'}, + 'peak_disk_gi_b': {'key': 'peakDiskGiB', 'type': 'float'}, + 'disk_read_iops': {'key': 'diskReadIOps', 'type': 'long'}, + 'disk_write_iops': {'key': 'diskWriteIOps', 'type': 'long'}, + 'disk_read_gi_b': {'key': 'diskReadGiB', 'type': 'float'}, + 'disk_write_gi_b': {'key': 'diskWriteGiB', 'type': 'float'}, + 'network_read_gi_b': {'key': 'networkReadGiB', 'type': 'float'}, + 'network_write_gi_b': {'key': 'networkWriteGiB', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(ResourceStatistics, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.avg_cpu_percentage = kwargs.get('avg_cpu_percentage', None) + self.avg_memory_gi_b = kwargs.get('avg_memory_gi_b', None) + self.peak_memory_gi_b = kwargs.get('peak_memory_gi_b', None) + self.avg_disk_gi_b = kwargs.get('avg_disk_gi_b', None) + self.peak_disk_gi_b = kwargs.get('peak_disk_gi_b', None) + self.disk_read_iops = kwargs.get('disk_read_iops', None) + self.disk_write_iops = kwargs.get('disk_write_iops', None) + self.disk_read_gi_b = kwargs.get('disk_read_gi_b', None) + self.disk_write_gi_b = kwargs.get('disk_write_gi_b', None) + self.network_read_gi_b = kwargs.get('network_read_gi_b', None) + self.network_write_gi_b = kwargs.get('network_write_gi_b', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/resource_statistics_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/resource_statistics_py3.py new file mode 100644 index 00000000..85289d9c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/resource_statistics_py3.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResourceStatistics(Model): + """Statistics related to resource consumption by Compute Nodes in a Pool. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param avg_cpu_percentage: Required. The average CPU usage across all + Compute Nodes in the Pool (percentage per node). + :type avg_cpu_percentage: float + :param avg_memory_gi_b: Required. The average memory usage in GiB across + all Compute Nodes in the Pool. + :type avg_memory_gi_b: float + :param peak_memory_gi_b: Required. The peak memory usage in GiB across all + Compute Nodes in the Pool. + :type peak_memory_gi_b: float + :param avg_disk_gi_b: Required. The average used disk space in GiB across + all Compute Nodes in the Pool. + :type avg_disk_gi_b: float + :param peak_disk_gi_b: Required. The peak used disk space in GiB across + all Compute Nodes in the Pool. + :type peak_disk_gi_b: float + :param disk_read_iops: Required. The total number of disk read operations + across all Compute Nodes in the Pool. + :type disk_read_iops: long + :param disk_write_iops: Required. The total number of disk write + operations across all Compute Nodes in the Pool. + :type disk_write_iops: long + :param disk_read_gi_b: Required. The total amount of data in GiB of disk + reads across all Compute Nodes in the Pool. + :type disk_read_gi_b: float + :param disk_write_gi_b: Required. The total amount of data in GiB of disk + writes across all Compute Nodes in the Pool. + :type disk_write_gi_b: float + :param network_read_gi_b: Required. The total amount of data in GiB of + network reads across all Compute Nodes in the Pool. + :type network_read_gi_b: float + :param network_write_gi_b: Required. The total amount of data in GiB of + network writes across all Compute Nodes in the Pool. + :type network_write_gi_b: float + """ + + _validation = { + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'avg_cpu_percentage': {'required': True}, + 'avg_memory_gi_b': {'required': True}, + 'peak_memory_gi_b': {'required': True}, + 'avg_disk_gi_b': {'required': True}, + 'peak_disk_gi_b': {'required': True}, + 'disk_read_iops': {'required': True}, + 'disk_write_iops': {'required': True}, + 'disk_read_gi_b': {'required': True}, + 'disk_write_gi_b': {'required': True}, + 'network_read_gi_b': {'required': True}, + 'network_write_gi_b': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'avg_cpu_percentage': {'key': 'avgCPUPercentage', 'type': 'float'}, + 'avg_memory_gi_b': {'key': 'avgMemoryGiB', 'type': 'float'}, + 'peak_memory_gi_b': {'key': 'peakMemoryGiB', 'type': 'float'}, + 'avg_disk_gi_b': {'key': 'avgDiskGiB', 'type': 'float'}, + 'peak_disk_gi_b': {'key': 'peakDiskGiB', 'type': 'float'}, + 'disk_read_iops': {'key': 'diskReadIOps', 'type': 'long'}, + 'disk_write_iops': {'key': 'diskWriteIOps', 'type': 'long'}, + 'disk_read_gi_b': {'key': 'diskReadGiB', 'type': 'float'}, + 'disk_write_gi_b': {'key': 'diskWriteGiB', 'type': 'float'}, + 'network_read_gi_b': {'key': 'networkReadGiB', 'type': 'float'}, + 'network_write_gi_b': {'key': 'networkWriteGiB', 'type': 'float'}, + } + + def __init__(self, *, start_time, last_update_time, avg_cpu_percentage: float, avg_memory_gi_b: float, peak_memory_gi_b: float, avg_disk_gi_b: float, peak_disk_gi_b: float, disk_read_iops: int, disk_write_iops: int, disk_read_gi_b: float, disk_write_gi_b: float, network_read_gi_b: float, network_write_gi_b: float, **kwargs) -> None: + super(ResourceStatistics, self).__init__(**kwargs) + self.start_time = start_time + self.last_update_time = last_update_time + self.avg_cpu_percentage = avg_cpu_percentage + self.avg_memory_gi_b = avg_memory_gi_b + self.peak_memory_gi_b = peak_memory_gi_b + self.avg_disk_gi_b = avg_disk_gi_b + self.peak_disk_gi_b = peak_disk_gi_b + self.disk_read_iops = disk_read_iops + self.disk_write_iops = disk_write_iops + self.disk_read_gi_b = disk_read_gi_b + self.disk_write_gi_b = disk_write_gi_b + self.network_read_gi_b = network_read_gi_b + self.network_write_gi_b = network_write_gi_b diff --git a/azext/generated/sdk/batch/v2019_06_01/models/schedule.py b/azext/generated/sdk/batch/v2019_06_01/models/schedule.py new file mode 100644 index 00000000..6faff7c4 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/schedule.py @@ -0,0 +1,74 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Schedule(Model): + """The schedule according to which Jobs will be created. + + :param do_not_run_until: The earliest time at which any Job may be created + under this Job Schedule. If you do not specify a doNotRunUntil time, the + schedule becomes ready to create Jobs immediately. + :type do_not_run_until: datetime + :param do_not_run_after: A time after which no Job will be created under + this Job Schedule. The schedule will move to the completed state as soon + as this deadline is past and there is no active Job under this Job + Schedule. If you do not specify a doNotRunAfter time, and you are creating + a recurring Job Schedule, the Job Schedule will remain active until you + explicitly terminate it. + :type do_not_run_after: datetime + :param start_window: The time interval, starting from the time at which + the schedule indicates a Job should be created, within which a Job must be + created. If a Job is not created within the startWindow interval, then the + 'opportunity' is lost; no Job will be created until the next recurrence of + the schedule. If the schedule is recurring, and the startWindow is longer + than the recurrence interval, then this is equivalent to an infinite + startWindow, because the Job that is 'due' in one recurrenceInterval is + not carried forward into the next recurrence interval. The default is + infinite. The minimum value is 1 minute. If you specify a lower value, the + Batch service rejects the schedule with an error; if you are calling the + REST API directly, the HTTP status code is 400 (Bad Request). + :type start_window: timedelta + :param recurrence_interval: The time interval between the start times of + two successive Jobs under the Job Schedule. A Job Schedule can have at + most one active Job under it at any given time. Because a Job Schedule can + have at most one active Job under it at any given time, if it is time to + create a new Job under a Job Schedule, but the previous Job is still + running, the Batch service will not create the new Job until the previous + Job finishes. If the previous Job does not finish within the startWindow + period of the new recurrenceInterval, then no new Job will be scheduled + for that interval. For recurring Jobs, you should normally specify a + jobManagerTask in the jobSpecification. If you do not use jobManagerTask, + you will need an external process to monitor when Jobs are created, add + Tasks to the Jobs and terminate the Jobs ready for the next recurrence. + The default is that the schedule does not recur: one Job is created, + within the startWindow after the doNotRunUntil time, and the schedule is + complete as soon as that Job finishes. The minimum value is 1 minute. If + you specify a lower value, the Batch service rejects the schedule with an + error; if you are calling the REST API directly, the HTTP status code is + 400 (Bad Request). + :type recurrence_interval: timedelta + """ + + _attribute_map = { + 'do_not_run_until': {'key': 'doNotRunUntil', 'type': 'iso-8601'}, + 'do_not_run_after': {'key': 'doNotRunAfter', 'type': 'iso-8601'}, + 'start_window': {'key': 'startWindow', 'type': 'duration'}, + 'recurrence_interval': {'key': 'recurrenceInterval', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(Schedule, self).__init__(**kwargs) + self.do_not_run_until = kwargs.get('do_not_run_until', None) + self.do_not_run_after = kwargs.get('do_not_run_after', None) + self.start_window = kwargs.get('start_window', None) + self.recurrence_interval = kwargs.get('recurrence_interval', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/schedule_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/schedule_py3.py new file mode 100644 index 00000000..34189c09 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/schedule_py3.py @@ -0,0 +1,74 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Schedule(Model): + """The schedule according to which Jobs will be created. + + :param do_not_run_until: The earliest time at which any Job may be created + under this Job Schedule. If you do not specify a doNotRunUntil time, the + schedule becomes ready to create Jobs immediately. + :type do_not_run_until: datetime + :param do_not_run_after: A time after which no Job will be created under + this Job Schedule. The schedule will move to the completed state as soon + as this deadline is past and there is no active Job under this Job + Schedule. If you do not specify a doNotRunAfter time, and you are creating + a recurring Job Schedule, the Job Schedule will remain active until you + explicitly terminate it. + :type do_not_run_after: datetime + :param start_window: The time interval, starting from the time at which + the schedule indicates a Job should be created, within which a Job must be + created. If a Job is not created within the startWindow interval, then the + 'opportunity' is lost; no Job will be created until the next recurrence of + the schedule. If the schedule is recurring, and the startWindow is longer + than the recurrence interval, then this is equivalent to an infinite + startWindow, because the Job that is 'due' in one recurrenceInterval is + not carried forward into the next recurrence interval. The default is + infinite. The minimum value is 1 minute. If you specify a lower value, the + Batch service rejects the schedule with an error; if you are calling the + REST API directly, the HTTP status code is 400 (Bad Request). + :type start_window: timedelta + :param recurrence_interval: The time interval between the start times of + two successive Jobs under the Job Schedule. A Job Schedule can have at + most one active Job under it at any given time. Because a Job Schedule can + have at most one active Job under it at any given time, if it is time to + create a new Job under a Job Schedule, but the previous Job is still + running, the Batch service will not create the new Job until the previous + Job finishes. If the previous Job does not finish within the startWindow + period of the new recurrenceInterval, then no new Job will be scheduled + for that interval. For recurring Jobs, you should normally specify a + jobManagerTask in the jobSpecification. If you do not use jobManagerTask, + you will need an external process to monitor when Jobs are created, add + Tasks to the Jobs and terminate the Jobs ready for the next recurrence. + The default is that the schedule does not recur: one Job is created, + within the startWindow after the doNotRunUntil time, and the schedule is + complete as soon as that Job finishes. The minimum value is 1 minute. If + you specify a lower value, the Batch service rejects the schedule with an + error; if you are calling the REST API directly, the HTTP status code is + 400 (Bad Request). + :type recurrence_interval: timedelta + """ + + _attribute_map = { + 'do_not_run_until': {'key': 'doNotRunUntil', 'type': 'iso-8601'}, + 'do_not_run_after': {'key': 'doNotRunAfter', 'type': 'iso-8601'}, + 'start_window': {'key': 'startWindow', 'type': 'duration'}, + 'recurrence_interval': {'key': 'recurrenceInterval', 'type': 'duration'}, + } + + def __init__(self, *, do_not_run_until=None, do_not_run_after=None, start_window=None, recurrence_interval=None, **kwargs) -> None: + super(Schedule, self).__init__(**kwargs) + self.do_not_run_until = do_not_run_until + self.do_not_run_after = do_not_run_after + self.start_window = start_window + self.recurrence_interval = recurrence_interval diff --git a/azext/generated/sdk/batch/v2019_06_01/models/start_task.py b/azext/generated/sdk/batch/v2019_06_01/models/start_task.py new file mode 100644 index 00000000..fcd94a77 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/start_task.py @@ -0,0 +1,116 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class StartTask(Model): + """A Task which is run when a Node joins a Pool in the Azure Batch service, or + when the Compute Node is rebooted or reimaged. + + Batch will retry Tasks when a recovery operation is triggered on a Node. + Examples of recovery operations include (but are not limited to) when an + unhealthy Node is rebooted or a Compute Node disappeared due to host + failure. Retries due to recovery operations are independent of and are not + counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, + an internal retry due to a recovery operation may occur. Because of this, + all Tasks should be idempotent. This means Tasks need to tolerate being + interrupted and restarted without causing any corruption or duplicate data. + The best practice for long running Tasks is to use some form of + checkpointing. In some cases the start Task may be re-run even though the + Compute Node was not rebooted. Special care should be taken to avoid start + Tasks which create breakaway process or install/launch services from the + start Task working directory, as this will block Batch from being able to + re-run the start Task. + + All required parameters must be populated in order to send to Azure. + + :param command_line: Required. The command line of the start Task. The + command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the Task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + start Task runs. When this is specified, all directories recursively below + the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the + node) are mapped into the container, all Task environment variables are + mapped into the container, and the Task command line is executed in the + container. Files produced in the container outside of + AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning + that Batch file APIs will not be able to access those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. Files listed under this + element are located in the Task's working directory. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the start Task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param user_identity: The user identity under which the start Task runs. + If omitted, the Task runs as a non-administrative user unique to the Task. + :type user_identity: ~azure.batch.models.UserIdentity + :param max_task_retry_count: The maximum number of times the Task may be + retried. The Batch service retries a Task if its exit code is nonzero. + Note that this value specifically controls the number of retries. The + Batch service will try the Task once, and may then retry up to this limit. + For example, if the maximum retry count is 3, Batch tries the Task up to 4 + times (one initial try and 3 retries). If the maximum retry count is 0, + the Batch service does not retry the Task. If the maximum retry count is + -1, the Batch service retries the Task without limit. + :type max_task_retry_count: int + :param wait_for_success: Whether the Batch service should wait for the + start Task to complete successfully (that is, to exit with exit code 0) + before scheduling any Tasks on the Compute Node. If true and the start + Task fails on a Node, the Batch service retries the start Task up to its + maximum retry count (maxTaskRetryCount). If the Task has still not + completed successfully after all retries, then the Batch service marks the + Node unusable, and will not schedule Tasks to it. This condition can be + detected via the Compute Node state and failure info details. If false, + the Batch service will not wait for the start Task to complete. In this + case, other Tasks can start executing on the Compute Node while the start + Task is still running; and even if the start Task fails, new Tasks will + continue to be scheduled on the Compute Node. The default is false. + :type wait_for_success: bool + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + 'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(StartTask, self).__init__(**kwargs) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.resource_files = kwargs.get('resource_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.user_identity = kwargs.get('user_identity', None) + self.max_task_retry_count = kwargs.get('max_task_retry_count', None) + self.wait_for_success = kwargs.get('wait_for_success', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/start_task_information.py b/azext/generated/sdk/batch/v2019_06_01/models/start_task_information.py new file mode 100644 index 00000000..0d3166e1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/start_task_information.py @@ -0,0 +1,99 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class StartTaskInformation(Model): + """Information about a start Task running on a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param state: Required. The state of the start Task on the Compute Node. + Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.StartTaskState + :param start_time: Required. The time at which the start Task started + running. This value is reset every time the Task is restarted or retried + (that is, this is the most recent time at which the start Task started + running). + :type start_time: datetime + :param end_time: The time at which the start Task stopped running. This is + the end time of the most recent run of the start Task, if that run has + completed (even if that run failed and a retry is pending). This element + is not present if the start Task is currently running. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the start Task + command line. This property is set only if the start Task is in the + completed state. In general, the exit code for a process reflects the + specific convention implemented by the application developer for that + process. If you use the exit code value to make decisions in your code, be + sure that you know the exit code convention used by the application + process. However, if the Batch service terminates the start Task (due to + timeout, or user termination via the API) you may see an operating + system-defined exit code. + :type exit_code: int + :param container_info: Information about the container under which the + Task is executing. This property is set only if the Task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the Task failure, if any. This + property is set only if the Task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the Task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the Task could not be run) and + file upload errors are not retried. The Batch service will retry the Task + up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the Task + started running. This element is present only if the Task was retried + (i.e. retryCount is nonzero). If present, this is typically the same as + startTime, but may be different if the Task has been restarted for reasons + other than retry; for example, if the Compute Node was rebooted during a + retry, then the startTime is updated but the lastRetryTime is not. + :type last_retry_time: datetime + :param result: The result of the Task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'state': {'required': True}, + 'start_time': {'required': True}, + 'retry_count': {'required': True}, + } + + _attribute_map = { + 'state': {'key': 'state', 'type': 'StartTaskState'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, **kwargs): + super(StartTaskInformation, self).__init__(**kwargs) + self.state = kwargs.get('state', None) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.exit_code = kwargs.get('exit_code', None) + self.container_info = kwargs.get('container_info', None) + self.failure_info = kwargs.get('failure_info', None) + self.retry_count = kwargs.get('retry_count', None) + self.last_retry_time = kwargs.get('last_retry_time', None) + self.result = kwargs.get('result', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/start_task_information_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/start_task_information_py3.py new file mode 100644 index 00000000..b632f82d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/start_task_information_py3.py @@ -0,0 +1,99 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class StartTaskInformation(Model): + """Information about a start Task running on a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param state: Required. The state of the start Task on the Compute Node. + Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.StartTaskState + :param start_time: Required. The time at which the start Task started + running. This value is reset every time the Task is restarted or retried + (that is, this is the most recent time at which the start Task started + running). + :type start_time: datetime + :param end_time: The time at which the start Task stopped running. This is + the end time of the most recent run of the start Task, if that run has + completed (even if that run failed and a retry is pending). This element + is not present if the start Task is currently running. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the start Task + command line. This property is set only if the start Task is in the + completed state. In general, the exit code for a process reflects the + specific convention implemented by the application developer for that + process. If you use the exit code value to make decisions in your code, be + sure that you know the exit code convention used by the application + process. However, if the Batch service terminates the start Task (due to + timeout, or user termination via the API) you may see an operating + system-defined exit code. + :type exit_code: int + :param container_info: Information about the container under which the + Task is executing. This property is set only if the Task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the Task failure, if any. This + property is set only if the Task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the Task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the Task could not be run) and + file upload errors are not retried. The Batch service will retry the Task + up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the Task + started running. This element is present only if the Task was retried + (i.e. retryCount is nonzero). If present, this is typically the same as + startTime, but may be different if the Task has been restarted for reasons + other than retry; for example, if the Compute Node was rebooted during a + retry, then the startTime is updated but the lastRetryTime is not. + :type last_retry_time: datetime + :param result: The result of the Task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'state': {'required': True}, + 'start_time': {'required': True}, + 'retry_count': {'required': True}, + } + + _attribute_map = { + 'state': {'key': 'state', 'type': 'StartTaskState'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, *, state, start_time, retry_count: int, end_time=None, exit_code: int=None, container_info=None, failure_info=None, last_retry_time=None, result=None, **kwargs) -> None: + super(StartTaskInformation, self).__init__(**kwargs) + self.state = state + self.start_time = start_time + self.end_time = end_time + self.exit_code = exit_code + self.container_info = container_info + self.failure_info = failure_info + self.retry_count = retry_count + self.last_retry_time = last_retry_time + self.result = result diff --git a/azext/generated/sdk/batch/v2019_06_01/models/start_task_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/start_task_py3.py new file mode 100644 index 00000000..57cccd28 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/start_task_py3.py @@ -0,0 +1,116 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class StartTask(Model): + """A Task which is run when a Node joins a Pool in the Azure Batch service, or + when the Compute Node is rebooted or reimaged. + + Batch will retry Tasks when a recovery operation is triggered on a Node. + Examples of recovery operations include (but are not limited to) when an + unhealthy Node is rebooted or a Compute Node disappeared due to host + failure. Retries due to recovery operations are independent of and are not + counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, + an internal retry due to a recovery operation may occur. Because of this, + all Tasks should be idempotent. This means Tasks need to tolerate being + interrupted and restarted without causing any corruption or duplicate data. + The best practice for long running Tasks is to use some form of + checkpointing. In some cases the start Task may be re-run even though the + Compute Node was not rebooted. Special care should be taken to avoid start + Tasks which create breakaway process or install/launch services from the + start Task working directory, as this will block Batch from being able to + re-run the start Task. + + All required parameters must be populated in order to send to Azure. + + :param command_line: Required. The command line of the start Task. The + command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the Task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + start Task runs. When this is specified, all directories recursively below + the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the + node) are mapped into the container, all Task environment variables are + mapped into the container, and the Task command line is executed in the + container. Files produced in the container outside of + AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning + that Batch file APIs will not be able to access those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. Files listed under this + element are located in the Task's working directory. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the start Task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param user_identity: The user identity under which the start Task runs. + If omitted, the Task runs as a non-administrative user unique to the Task. + :type user_identity: ~azure.batch.models.UserIdentity + :param max_task_retry_count: The maximum number of times the Task may be + retried. The Batch service retries a Task if its exit code is nonzero. + Note that this value specifically controls the number of retries. The + Batch service will try the Task once, and may then retry up to this limit. + For example, if the maximum retry count is 3, Batch tries the Task up to 4 + times (one initial try and 3 retries). If the maximum retry count is 0, + the Batch service does not retry the Task. If the maximum retry count is + -1, the Batch service retries the Task without limit. + :type max_task_retry_count: int + :param wait_for_success: Whether the Batch service should wait for the + start Task to complete successfully (that is, to exit with exit code 0) + before scheduling any Tasks on the Compute Node. If true and the start + Task fails on a Node, the Batch service retries the start Task up to its + maximum retry count (maxTaskRetryCount). If the Task has still not + completed successfully after all retries, then the Batch service marks the + Node unusable, and will not schedule Tasks to it. This condition can be + detected via the Compute Node state and failure info details. If false, + the Batch service will not wait for the start Task to complete. In this + case, other Tasks can start executing on the Compute Node while the start + Task is still running; and even if the start Task fails, new Tasks will + continue to be scheduled on the Compute Node. The default is false. + :type wait_for_success: bool + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + 'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'}, + } + + def __init__(self, *, command_line: str, container_settings=None, resource_files=None, environment_settings=None, user_identity=None, max_task_retry_count: int=None, wait_for_success: bool=None, **kwargs) -> None: + super(StartTask, self).__init__(**kwargs) + self.command_line = command_line + self.container_settings = container_settings + self.resource_files = resource_files + self.environment_settings = environment_settings + self.user_identity = user_identity + self.max_task_retry_count = max_task_retry_count + self.wait_for_success = wait_for_success diff --git a/azext/generated/sdk/batch/v2019_06_01/models/subtask_information.py b/azext/generated/sdk/batch/v2019_06_01/models/subtask_information.py new file mode 100644 index 00000000..3af9e1d4 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/subtask_information.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class SubtaskInformation(Model): + """Information about an Azure Batch subtask. + + :param id: The ID of the subtask. + :type id: int + :param node_info: Information about the Compute Node on which the subtask + ran. + :type node_info: ~azure.batch.models.ComputeNodeInformation + :param start_time: The time at which the subtask started running. If the + subtask has been restarted or retried, this is the most recent time at + which the subtask started running. + :type start_time: datetime + :param end_time: The time at which the subtask completed. This property is + set only if the subtask is in the Completed state. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the subtask + command line. This property is set only if the subtask is in the completed + state. In general, the exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. + However, if the Batch service terminates the subtask (due to timeout, or + user termination via the API) you may see an operating system-defined exit + code. + :type exit_code: int + :param container_info: Information about the container under which the + Task is executing. This property is set only if the Task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the Task failure, if any. This + property is set only if the Task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param state: The current state of the subtask. Possible values include: + 'preparing', 'running', 'completed' + :type state: str or ~azure.batch.models.SubtaskState + :param state_transition_time: The time at which the subtask entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the subtask. This property is + not set if the subtask is in its initial running state. Possible values + include: 'preparing', 'running', 'completed' + :type previous_state: str or ~azure.batch.models.SubtaskState + :param previous_state_transition_time: The time at which the subtask + entered its previous state. This property is not set if the subtask is in + its initial running state. + :type previous_state_transition_time: datetime + :param result: The result of the Task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'int'}, + 'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'state': {'key': 'state', 'type': 'SubtaskState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'SubtaskState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, **kwargs): + super(SubtaskInformation, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.node_info = kwargs.get('node_info', None) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.exit_code = kwargs.get('exit_code', None) + self.container_info = kwargs.get('container_info', None) + self.failure_info = kwargs.get('failure_info', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.previous_state = kwargs.get('previous_state', None) + self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) + self.result = kwargs.get('result', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/subtask_information_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/subtask_information_py3.py new file mode 100644 index 00000000..a62d27fb --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/subtask_information_py3.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class SubtaskInformation(Model): + """Information about an Azure Batch subtask. + + :param id: The ID of the subtask. + :type id: int + :param node_info: Information about the Compute Node on which the subtask + ran. + :type node_info: ~azure.batch.models.ComputeNodeInformation + :param start_time: The time at which the subtask started running. If the + subtask has been restarted or retried, this is the most recent time at + which the subtask started running. + :type start_time: datetime + :param end_time: The time at which the subtask completed. This property is + set only if the subtask is in the Completed state. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the subtask + command line. This property is set only if the subtask is in the completed + state. In general, the exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. + However, if the Batch service terminates the subtask (due to timeout, or + user termination via the API) you may see an operating system-defined exit + code. + :type exit_code: int + :param container_info: Information about the container under which the + Task is executing. This property is set only if the Task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the Task failure, if any. This + property is set only if the Task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param state: The current state of the subtask. Possible values include: + 'preparing', 'running', 'completed' + :type state: str or ~azure.batch.models.SubtaskState + :param state_transition_time: The time at which the subtask entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the subtask. This property is + not set if the subtask is in its initial running state. Possible values + include: 'preparing', 'running', 'completed' + :type previous_state: str or ~azure.batch.models.SubtaskState + :param previous_state_transition_time: The time at which the subtask + entered its previous state. This property is not set if the subtask is in + its initial running state. + :type previous_state_transition_time: datetime + :param result: The result of the Task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'int'}, + 'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'state': {'key': 'state', 'type': 'SubtaskState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'SubtaskState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, *, id: int=None, node_info=None, start_time=None, end_time=None, exit_code: int=None, container_info=None, failure_info=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, result=None, **kwargs) -> None: + super(SubtaskInformation, self).__init__(**kwargs) + self.id = id + self.node_info = node_info + self.start_time = start_time + self.end_time = end_time + self.exit_code = exit_code + self.container_info = container_info + self.failure_info = failure_info + self.state = state + self.state_transition_time = state_transition_time + self.previous_state = previous_state + self.previous_state_transition_time = previous_state_transition_time + self.result = result diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_add_collection_options.py b/azext/generated/sdk/batch/v2019_06_01/models/task_add_collection_options.py new file mode 100644 index 00000000..f0622c9c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_add_collection_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionOptions(Model): + """Additional parameters for add_collection operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskAddCollectionOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_add_collection_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_add_collection_options_py3.py new file mode 100644 index 00000000..634f522c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_add_collection_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionOptions(Model): + """Additional parameters for add_collection operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(TaskAddCollectionOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_add_collection_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/task_add_collection_parameter.py new file mode 100644 index 00000000..be387bb5 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_add_collection_parameter.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionParameter(Model): + """A collection of Azure Batch Tasks to add. + + All required parameters must be populated in order to send to Azure. + + :param value: Required. The collection of Tasks to add. The maximum count + of Tasks is 100. The total serialized size of this collection must be less + than 1MB. If it is greater than 1MB (for example if each Task has 100's of + resource files or environment variables), the request will fail with code + 'RequestBodyTooLarge' and should be retried again with fewer Tasks. + :type value: list[~azure.batch.models.TaskAddParameter] + """ + + _validation = { + 'value': {'required': True, 'max_items': 100}, + } + + _attribute_map = { + 'value': {'key': 'value', 'type': '[TaskAddParameter]'}, + } + + def __init__(self, **kwargs): + super(TaskAddCollectionParameter, self).__init__(**kwargs) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_add_collection_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_add_collection_parameter_py3.py new file mode 100644 index 00000000..ed3330e2 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_add_collection_parameter_py3.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionParameter(Model): + """A collection of Azure Batch Tasks to add. + + All required parameters must be populated in order to send to Azure. + + :param value: Required. The collection of Tasks to add. The maximum count + of Tasks is 100. The total serialized size of this collection must be less + than 1MB. If it is greater than 1MB (for example if each Task has 100's of + resource files or environment variables), the request will fail with code + 'RequestBodyTooLarge' and should be retried again with fewer Tasks. + :type value: list[~azure.batch.models.TaskAddParameter] + """ + + _validation = { + 'value': {'required': True, 'max_items': 100}, + } + + _attribute_map = { + 'value': {'key': 'value', 'type': '[TaskAddParameter]'}, + } + + def __init__(self, *, value, **kwargs) -> None: + super(TaskAddCollectionParameter, self).__init__(**kwargs) + self.value = value diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_add_collection_result.py b/azext/generated/sdk/batch/v2019_06_01/models/task_add_collection_result.py new file mode 100644 index 00000000..caffae23 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_add_collection_result.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionResult(Model): + """The result of adding a collection of Tasks to a Job. + + :param value: The results of the add Task collection operation. + :type value: list[~azure.batch.models.TaskAddResult] + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[TaskAddResult]'}, + } + + def __init__(self, **kwargs): + super(TaskAddCollectionResult, self).__init__(**kwargs) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_add_collection_result_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_add_collection_result_py3.py new file mode 100644 index 00000000..f64f9f53 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_add_collection_result_py3.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionResult(Model): + """The result of adding a collection of Tasks to a Job. + + :param value: The results of the add Task collection operation. + :type value: list[~azure.batch.models.TaskAddResult] + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[TaskAddResult]'}, + } + + def __init__(self, *, value=None, **kwargs) -> None: + super(TaskAddCollectionResult, self).__init__(**kwargs) + self.value = value diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_add_options.py b/azext/generated/sdk/batch/v2019_06_01/models/task_add_options.py new file mode 100644 index 00000000..667cc19d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_add_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskAddOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_add_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_add_options_py3.py new file mode 100644 index 00000000..da9c6a8c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_add_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(TaskAddOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_add_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/task_add_parameter.py new file mode 100644 index 00000000..0bad1f29 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_add_parameter.py @@ -0,0 +1,170 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddParameter(Model): + """An Azure Batch Task to add. + + Batch will retry Tasks when a recovery operation is triggered on a Node. + Examples of recovery operations include (but are not limited to) when an + unhealthy Node is rebooted or a Compute Node disappeared due to host + failure. Retries due to recovery operations are independent of and are not + counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, + an internal retry due to a recovery operation may occur. Because of this, + all Tasks should be idempotent. This means Tasks need to tolerate being + interrupted and restarted without causing any corruption or duplicate data. + The best practice for long running Tasks is to use some form of + checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the Task within the + Job. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within a Job that differ only by case). + :type id: str + :param display_name: A display name for the Task. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param command_line: Required. The command line of the Task. For + multi-instance Tasks, the command line is executed as the primary Task, + after the primary Task and all subtasks have finished executing the + coordination command line. The command line does not run under a shell, + and therefore cannot take advantage of shell features such as environment + variable expansion. If you want to take advantage of such features, you + should invoke the shell in the command line, for example using "cmd /c + MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command + line refers to file paths, it should use a relative path (relative to the + Task working directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Task runs. If the Pool that will run this Task has containerConfiguration + set, this must be set as well. If the Pool that will run this Task doesn't + have containerConfiguration set, this must not be set. When this is + specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR + (the root of Azure Batch directories on the node) are mapped into the + container, all Task environment variables are mapped into the container, + and the Task command line is executed in the container. Files produced in + the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to + the host disk, meaning that Batch file APIs will not be able to access + those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param exit_conditions: How the Batch service should respond when the Task + completes. + :type exit_conditions: ~azure.batch.models.ExitConditions + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. For + multi-instance Tasks, the resource files will only be downloaded to the + Compute Node on which the primary Task is executed. There is a maximum + size for the list of resource files. When the max size is exceeded, the + request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the Compute Node after running the command line. For multi-instance + Tasks, the files will only be uploaded from the Compute Node on which the + primary Task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the Task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param affinity_info: A locality hint that can be used by the Batch + service to select a Compute Node on which to start the new Task. + :type affinity_info: ~azure.batch.models.AffinityInformation + :param constraints: The execution constraints that apply to this Task. If + you do not specify constraints, the maxTaskRetryCount is the + maxTaskRetryCount specified for the Job, the maxWallClockTime is infinite, + and the retentionTime is 7 days. + :type constraints: ~azure.batch.models.TaskConstraints + :param user_identity: The user identity under which the Task runs. If + omitted, the Task runs as a non-administrative user unique to the Task. + :type user_identity: ~azure.batch.models.UserIdentity + :param multi_instance_settings: An object that indicates that the Task is + a multi-instance Task, and contains information about how to run the + multi-instance Task. + :type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings + :param depends_on: The Tasks that this Task depends on. This Task will not + be scheduled until all Tasks that it depends on have completed + successfully. If any of those Tasks fail and exhaust their retry counts, + this Task will never be scheduled. If the Job does not have + usesTaskDependencies set to true, and this element is present, the request + fails with error code TaskDependenciesNotSpecifiedOnJob. + :type depends_on: ~azure.batch.models.TaskDependencies + :param application_package_references: A list of Packages that the Batch + service will deploy to the Compute Node before running the command line. + Application packages are downloaded and deployed to a shared directory, + not the Task working directory. Therefore, if a referenced package is + already on the Node, and is up to date, then it is not re-downloaded; the + existing copy on the Compute Node is used. If a referenced Package cannot + be installed, for example because the package has been deleted or because + download failed, the Task fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the Task can use to perform Batch service operations. If this + property is set, the Batch service provides the Task with an + authentication token which can be used to authenticate Batch service + operations without requiring an Account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the Task can carry out using the token depend on the settings. For + example, a Task can request Job permissions in order to add other Tasks to + the Job, or check the status of the Job or of other Tasks under the Job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + """ + + _validation = { + 'id': {'required': True}, + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'}, + 'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + } + + def __init__(self, **kwargs): + super(TaskAddParameter, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.exit_conditions = kwargs.get('exit_conditions', None) + self.resource_files = kwargs.get('resource_files', None) + self.output_files = kwargs.get('output_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.affinity_info = kwargs.get('affinity_info', None) + self.constraints = kwargs.get('constraints', None) + self.user_identity = kwargs.get('user_identity', None) + self.multi_instance_settings = kwargs.get('multi_instance_settings', None) + self.depends_on = kwargs.get('depends_on', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.authentication_token_settings = kwargs.get('authentication_token_settings', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_add_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_add_parameter_py3.py new file mode 100644 index 00000000..15121d18 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_add_parameter_py3.py @@ -0,0 +1,170 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddParameter(Model): + """An Azure Batch Task to add. + + Batch will retry Tasks when a recovery operation is triggered on a Node. + Examples of recovery operations include (but are not limited to) when an + unhealthy Node is rebooted or a Compute Node disappeared due to host + failure. Retries due to recovery operations are independent of and are not + counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, + an internal retry due to a recovery operation may occur. Because of this, + all Tasks should be idempotent. This means Tasks need to tolerate being + interrupted and restarted without causing any corruption or duplicate data. + The best practice for long running Tasks is to use some form of + checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the Task within the + Job. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within a Job that differ only by case). + :type id: str + :param display_name: A display name for the Task. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param command_line: Required. The command line of the Task. For + multi-instance Tasks, the command line is executed as the primary Task, + after the primary Task and all subtasks have finished executing the + coordination command line. The command line does not run under a shell, + and therefore cannot take advantage of shell features such as environment + variable expansion. If you want to take advantage of such features, you + should invoke the shell in the command line, for example using "cmd /c + MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command + line refers to file paths, it should use a relative path (relative to the + Task working directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Task runs. If the Pool that will run this Task has containerConfiguration + set, this must be set as well. If the Pool that will run this Task doesn't + have containerConfiguration set, this must not be set. When this is + specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR + (the root of Azure Batch directories on the node) are mapped into the + container, all Task environment variables are mapped into the container, + and the Task command line is executed in the container. Files produced in + the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to + the host disk, meaning that Batch file APIs will not be able to access + those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param exit_conditions: How the Batch service should respond when the Task + completes. + :type exit_conditions: ~azure.batch.models.ExitConditions + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. For + multi-instance Tasks, the resource files will only be downloaded to the + Compute Node on which the primary Task is executed. There is a maximum + size for the list of resource files. When the max size is exceeded, the + request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the Compute Node after running the command line. For multi-instance + Tasks, the files will only be uploaded from the Compute Node on which the + primary Task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the Task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param affinity_info: A locality hint that can be used by the Batch + service to select a Compute Node on which to start the new Task. + :type affinity_info: ~azure.batch.models.AffinityInformation + :param constraints: The execution constraints that apply to this Task. If + you do not specify constraints, the maxTaskRetryCount is the + maxTaskRetryCount specified for the Job, the maxWallClockTime is infinite, + and the retentionTime is 7 days. + :type constraints: ~azure.batch.models.TaskConstraints + :param user_identity: The user identity under which the Task runs. If + omitted, the Task runs as a non-administrative user unique to the Task. + :type user_identity: ~azure.batch.models.UserIdentity + :param multi_instance_settings: An object that indicates that the Task is + a multi-instance Task, and contains information about how to run the + multi-instance Task. + :type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings + :param depends_on: The Tasks that this Task depends on. This Task will not + be scheduled until all Tasks that it depends on have completed + successfully. If any of those Tasks fail and exhaust their retry counts, + this Task will never be scheduled. If the Job does not have + usesTaskDependencies set to true, and this element is present, the request + fails with error code TaskDependenciesNotSpecifiedOnJob. + :type depends_on: ~azure.batch.models.TaskDependencies + :param application_package_references: A list of Packages that the Batch + service will deploy to the Compute Node before running the command line. + Application packages are downloaded and deployed to a shared directory, + not the Task working directory. Therefore, if a referenced package is + already on the Node, and is up to date, then it is not re-downloaded; the + existing copy on the Compute Node is used. If a referenced Package cannot + be installed, for example because the package has been deleted or because + download failed, the Task fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the Task can use to perform Batch service operations. If this + property is set, the Batch service provides the Task with an + authentication token which can be used to authenticate Batch service + operations without requiring an Account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the Task can carry out using the token depend on the settings. For + example, a Task can request Job permissions in order to add other Tasks to + the Job, or check the status of the Job or of other Tasks under the Job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + """ + + _validation = { + 'id': {'required': True}, + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'}, + 'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + } + + def __init__(self, *, id: str, command_line: str, display_name: str=None, container_settings=None, exit_conditions=None, resource_files=None, output_files=None, environment_settings=None, affinity_info=None, constraints=None, user_identity=None, multi_instance_settings=None, depends_on=None, application_package_references=None, authentication_token_settings=None, **kwargs) -> None: + super(TaskAddParameter, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.command_line = command_line + self.container_settings = container_settings + self.exit_conditions = exit_conditions + self.resource_files = resource_files + self.output_files = output_files + self.environment_settings = environment_settings + self.affinity_info = affinity_info + self.constraints = constraints + self.user_identity = user_identity + self.multi_instance_settings = multi_instance_settings + self.depends_on = depends_on + self.application_package_references = application_package_references + self.authentication_token_settings = authentication_token_settings diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_add_result.py b/azext/generated/sdk/batch/v2019_06_01/models/task_add_result.py new file mode 100644 index 00000000..fc4261e6 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_add_result.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddResult(Model): + """Result for a single Task added as part of an add Task collection operation. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. The status of the add Task request. Possible + values include: 'success', 'clientError', 'serverError' + :type status: str or ~azure.batch.models.TaskAddStatus + :param task_id: Required. The ID of the Task for which this is the result. + :type task_id: str + :param e_tag: The ETag of the Task, if the Task was successfully added. + You can use this to detect whether the Task has changed between requests. + In particular, you can be pass the ETag with an Update Task request to + specify that your changes should take effect only if nobody else has + modified the Job in the meantime. + :type e_tag: str + :param last_modified: The last modified time of the Task. + :type last_modified: datetime + :param location: The URL of the Task, if the Task was successfully added. + :type location: str + :param error: The error encountered while attempting to add the Task. + :type error: ~azure.batch.models.BatchError + """ + + _validation = { + 'status': {'required': True}, + 'task_id': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'TaskAddStatus'}, + 'task_id': {'key': 'taskId', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'location': {'key': 'location', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'BatchError'}, + } + + def __init__(self, **kwargs): + super(TaskAddResult, self).__init__(**kwargs) + self.status = kwargs.get('status', None) + self.task_id = kwargs.get('task_id', None) + self.e_tag = kwargs.get('e_tag', None) + self.last_modified = kwargs.get('last_modified', None) + self.location = kwargs.get('location', None) + self.error = kwargs.get('error', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_add_result_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_add_result_py3.py new file mode 100644 index 00000000..6a26f36f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_add_result_py3.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddResult(Model): + """Result for a single Task added as part of an add Task collection operation. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. The status of the add Task request. Possible + values include: 'success', 'clientError', 'serverError' + :type status: str or ~azure.batch.models.TaskAddStatus + :param task_id: Required. The ID of the Task for which this is the result. + :type task_id: str + :param e_tag: The ETag of the Task, if the Task was successfully added. + You can use this to detect whether the Task has changed between requests. + In particular, you can be pass the ETag with an Update Task request to + specify that your changes should take effect only if nobody else has + modified the Job in the meantime. + :type e_tag: str + :param last_modified: The last modified time of the Task. + :type last_modified: datetime + :param location: The URL of the Task, if the Task was successfully added. + :type location: str + :param error: The error encountered while attempting to add the Task. + :type error: ~azure.batch.models.BatchError + """ + + _validation = { + 'status': {'required': True}, + 'task_id': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'TaskAddStatus'}, + 'task_id': {'key': 'taskId', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'location': {'key': 'location', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'BatchError'}, + } + + def __init__(self, *, status, task_id: str, e_tag: str=None, last_modified=None, location: str=None, error=None, **kwargs) -> None: + super(TaskAddResult, self).__init__(**kwargs) + self.status = status + self.task_id = task_id + self.e_tag = e_tag + self.last_modified = last_modified + self.location = location + self.error = error diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_constraints.py b/azext/generated/sdk/batch/v2019_06_01/models/task_constraints.py new file mode 100644 index 00000000..8fea5f57 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_constraints.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskConstraints(Model): + """Execution constraints to apply to a Task. + + :param max_wall_clock_time: The maximum elapsed time that the Task may + run, measured from the time the Task starts. If the Task does not complete + within the time limit, the Batch service terminates it. If this is not + specified, there is no time limit on how long the Task may run. + :type max_wall_clock_time: timedelta + :param retention_time: The minimum time to retain the Task directory on + the Compute Node where it ran, from the time it completes execution. After + this time, the Batch service may delete the Task directory and all its + contents. The default is 7 days, i.e. the Task directory will be retained + for 7 days unless the Compute Node is removed or the Job is deleted. + :type retention_time: timedelta + :param max_task_retry_count: The maximum number of times the Task may be + retried. The Batch service retries a Task if its exit code is nonzero. + Note that this value specifically controls the number of retries for the + Task executable due to a nonzero exit code. The Batch service will try the + Task once, and may then retry up to this limit. For example, if the + maximum retry count is 3, Batch tries the Task up to 4 times (one initial + try and 3 retries). If the maximum retry count is 0, the Batch service + does not retry the Task after the first attempt. If the maximum retry + count is -1, the Batch service retries the Task without limit. + :type max_task_retry_count: int + """ + + _attribute_map = { + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'retention_time': {'key': 'retentionTime', 'type': 'duration'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(TaskConstraints, self).__init__(**kwargs) + self.max_wall_clock_time = kwargs.get('max_wall_clock_time', None) + self.retention_time = kwargs.get('retention_time', None) + self.max_task_retry_count = kwargs.get('max_task_retry_count', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_constraints_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_constraints_py3.py new file mode 100644 index 00000000..70027cf0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_constraints_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskConstraints(Model): + """Execution constraints to apply to a Task. + + :param max_wall_clock_time: The maximum elapsed time that the Task may + run, measured from the time the Task starts. If the Task does not complete + within the time limit, the Batch service terminates it. If this is not + specified, there is no time limit on how long the Task may run. + :type max_wall_clock_time: timedelta + :param retention_time: The minimum time to retain the Task directory on + the Compute Node where it ran, from the time it completes execution. After + this time, the Batch service may delete the Task directory and all its + contents. The default is 7 days, i.e. the Task directory will be retained + for 7 days unless the Compute Node is removed or the Job is deleted. + :type retention_time: timedelta + :param max_task_retry_count: The maximum number of times the Task may be + retried. The Batch service retries a Task if its exit code is nonzero. + Note that this value specifically controls the number of retries for the + Task executable due to a nonzero exit code. The Batch service will try the + Task once, and may then retry up to this limit. For example, if the + maximum retry count is 3, Batch tries the Task up to 4 times (one initial + try and 3 retries). If the maximum retry count is 0, the Batch service + does not retry the Task after the first attempt. If the maximum retry + count is -1, the Batch service retries the Task without limit. + :type max_task_retry_count: int + """ + + _attribute_map = { + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'retention_time': {'key': 'retentionTime', 'type': 'duration'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + } + + def __init__(self, *, max_wall_clock_time=None, retention_time=None, max_task_retry_count: int=None, **kwargs) -> None: + super(TaskConstraints, self).__init__(**kwargs) + self.max_wall_clock_time = max_wall_clock_time + self.retention_time = retention_time + self.max_task_retry_count = max_task_retry_count diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_container_execution_information.py b/azext/generated/sdk/batch/v2019_06_01/models/task_container_execution_information.py new file mode 100644 index 00000000..153ba043 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_container_execution_information.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskContainerExecutionInformation(Model): + """Contains information about the container which a Task is executing. + + :param container_id: The ID of the container. + :type container_id: str + :param state: The state of the container. This is the state of the + container according to the Docker service. It is equivalent to the status + field returned by "docker inspect". + :type state: str + :param error: Detailed error information about the container. This is the + detailed error string from the Docker service, if available. It is + equivalent to the error field returned by "docker inspect". + :type error: str + """ + + _attribute_map = { + 'container_id': {'key': 'containerId', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(TaskContainerExecutionInformation, self).__init__(**kwargs) + self.container_id = kwargs.get('container_id', None) + self.state = kwargs.get('state', None) + self.error = kwargs.get('error', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_container_execution_information_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_container_execution_information_py3.py new file mode 100644 index 00000000..04d96708 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_container_execution_information_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskContainerExecutionInformation(Model): + """Contains information about the container which a Task is executing. + + :param container_id: The ID of the container. + :type container_id: str + :param state: The state of the container. This is the state of the + container according to the Docker service. It is equivalent to the status + field returned by "docker inspect". + :type state: str + :param error: Detailed error information about the container. This is the + detailed error string from the Docker service, if available. It is + equivalent to the error field returned by "docker inspect". + :type error: str + """ + + _attribute_map = { + 'container_id': {'key': 'containerId', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'str'}, + } + + def __init__(self, *, container_id: str=None, state: str=None, error: str=None, **kwargs) -> None: + super(TaskContainerExecutionInformation, self).__init__(**kwargs) + self.container_id = container_id + self.state = state + self.error = error diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_container_settings.py b/azext/generated/sdk/batch/v2019_06_01/models/task_container_settings.py new file mode 100644 index 00000000..210829b2 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_container_settings.py @@ -0,0 +1,55 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskContainerSettings(Model): + """The container settings for a Task. + + All required parameters must be populated in order to send to Azure. + + :param container_run_options: Additional options to the container create + command. These additional options are supplied as arguments to the "docker + create" command, in addition to those controlled by the Batch Service. + :type container_run_options: str + :param image_name: Required. The Image to use to create the container in + which the Task will run. This is the full Image reference, as would be + specified to "docker pull". If no tag is provided as part of the Image + name, the tag ":latest" is used as a default. + :type image_name: str + :param registry: The private registry which contains the container Image. + This setting can be omitted if was already provided at Pool creation. + :type registry: ~azure.batch.models.ContainerRegistry + :param working_directory: The location of the container Task working + directory. The default is 'taskWorkingDirectory'. Possible values include: + 'taskWorkingDirectory', 'containerImageDefault' + :type working_directory: str or + ~azure.batch.models.ContainerWorkingDirectory + """ + + _validation = { + 'image_name': {'required': True}, + } + + _attribute_map = { + 'container_run_options': {'key': 'containerRunOptions', 'type': 'str'}, + 'image_name': {'key': 'imageName', 'type': 'str'}, + 'registry': {'key': 'registry', 'type': 'ContainerRegistry'}, + 'working_directory': {'key': 'workingDirectory', 'type': 'ContainerWorkingDirectory'}, + } + + def __init__(self, **kwargs): + super(TaskContainerSettings, self).__init__(**kwargs) + self.container_run_options = kwargs.get('container_run_options', None) + self.image_name = kwargs.get('image_name', None) + self.registry = kwargs.get('registry', None) + self.working_directory = kwargs.get('working_directory', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_container_settings_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_container_settings_py3.py new file mode 100644 index 00000000..cf8fbca1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_container_settings_py3.py @@ -0,0 +1,55 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskContainerSettings(Model): + """The container settings for a Task. + + All required parameters must be populated in order to send to Azure. + + :param container_run_options: Additional options to the container create + command. These additional options are supplied as arguments to the "docker + create" command, in addition to those controlled by the Batch Service. + :type container_run_options: str + :param image_name: Required. The Image to use to create the container in + which the Task will run. This is the full Image reference, as would be + specified to "docker pull". If no tag is provided as part of the Image + name, the tag ":latest" is used as a default. + :type image_name: str + :param registry: The private registry which contains the container Image. + This setting can be omitted if was already provided at Pool creation. + :type registry: ~azure.batch.models.ContainerRegistry + :param working_directory: The location of the container Task working + directory. The default is 'taskWorkingDirectory'. Possible values include: + 'taskWorkingDirectory', 'containerImageDefault' + :type working_directory: str or + ~azure.batch.models.ContainerWorkingDirectory + """ + + _validation = { + 'image_name': {'required': True}, + } + + _attribute_map = { + 'container_run_options': {'key': 'containerRunOptions', 'type': 'str'}, + 'image_name': {'key': 'imageName', 'type': 'str'}, + 'registry': {'key': 'registry', 'type': 'ContainerRegistry'}, + 'working_directory': {'key': 'workingDirectory', 'type': 'ContainerWorkingDirectory'}, + } + + def __init__(self, *, image_name: str, container_run_options: str=None, registry=None, working_directory=None, **kwargs) -> None: + super(TaskContainerSettings, self).__init__(**kwargs) + self.container_run_options = container_run_options + self.image_name = image_name + self.registry = registry + self.working_directory = working_directory diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_counts.py b/azext/generated/sdk/batch/v2019_06_01/models/task_counts.py new file mode 100644 index 00000000..7f40ffef --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_counts.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskCounts(Model): + """The Task counts for a Job. + + All required parameters must be populated in order to send to Azure. + + :param active: Required. The number of Tasks in the active state. + :type active: int + :param running: Required. The number of Tasks in the running or preparing + state. + :type running: int + :param completed: Required. The number of Tasks in the completed state. + :type completed: int + :param succeeded: Required. The number of Tasks which succeeded. A Task + succeeds if its result (found in the executionInfo property) is 'success'. + :type succeeded: int + :param failed: Required. The number of Tasks which failed. A Task fails if + its result (found in the executionInfo property) is 'failure'. + :type failed: int + """ + + _validation = { + 'active': {'required': True}, + 'running': {'required': True}, + 'completed': {'required': True}, + 'succeeded': {'required': True}, + 'failed': {'required': True}, + } + + _attribute_map = { + 'active': {'key': 'active', 'type': 'int'}, + 'running': {'key': 'running', 'type': 'int'}, + 'completed': {'key': 'completed', 'type': 'int'}, + 'succeeded': {'key': 'succeeded', 'type': 'int'}, + 'failed': {'key': 'failed', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(TaskCounts, self).__init__(**kwargs) + self.active = kwargs.get('active', None) + self.running = kwargs.get('running', None) + self.completed = kwargs.get('completed', None) + self.succeeded = kwargs.get('succeeded', None) + self.failed = kwargs.get('failed', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_counts_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_counts_py3.py new file mode 100644 index 00000000..55019794 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_counts_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskCounts(Model): + """The Task counts for a Job. + + All required parameters must be populated in order to send to Azure. + + :param active: Required. The number of Tasks in the active state. + :type active: int + :param running: Required. The number of Tasks in the running or preparing + state. + :type running: int + :param completed: Required. The number of Tasks in the completed state. + :type completed: int + :param succeeded: Required. The number of Tasks which succeeded. A Task + succeeds if its result (found in the executionInfo property) is 'success'. + :type succeeded: int + :param failed: Required. The number of Tasks which failed. A Task fails if + its result (found in the executionInfo property) is 'failure'. + :type failed: int + """ + + _validation = { + 'active': {'required': True}, + 'running': {'required': True}, + 'completed': {'required': True}, + 'succeeded': {'required': True}, + 'failed': {'required': True}, + } + + _attribute_map = { + 'active': {'key': 'active', 'type': 'int'}, + 'running': {'key': 'running', 'type': 'int'}, + 'completed': {'key': 'completed', 'type': 'int'}, + 'succeeded': {'key': 'succeeded', 'type': 'int'}, + 'failed': {'key': 'failed', 'type': 'int'}, + } + + def __init__(self, *, active: int, running: int, completed: int, succeeded: int, failed: int, **kwargs) -> None: + super(TaskCounts, self).__init__(**kwargs) + self.active = active + self.running = running + self.completed = completed + self.succeeded = succeeded + self.failed = failed diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_delete_options.py b/azext/generated/sdk/batch/v2019_06_01/models/task_delete_options.py new file mode 100644 index 00000000..2daf7608 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_delete_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskDeleteOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_delete_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_delete_options_py3.py new file mode 100644 index 00000000..4b836c65 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_delete_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(TaskDeleteOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_dependencies.py b/azext/generated/sdk/batch/v2019_06_01/models/task_dependencies.py new file mode 100644 index 00000000..14f17278 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_dependencies.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskDependencies(Model): + """Specifies any dependencies of a Task. Any Task that is explicitly specified + or within a dependency range must complete before the dependant Task will + be scheduled. + + :param task_ids: The list of Task IDs that this Task depends on. All Tasks + in this list must complete successfully before the dependent Task can be + scheduled. The taskIds collection is limited to 64000 characters total + (i.e. the combined length of all Task IDs). If the taskIds collection + exceeds the maximum length, the Add Task request fails with error code + TaskDependencyListTooLong. In this case consider using Task ID ranges + instead. + :type task_ids: list[str] + :param task_id_ranges: The list of Task ID ranges that this Task depends + on. All Tasks in all ranges must complete successfully before the + dependent Task can be scheduled. + :type task_id_ranges: list[~azure.batch.models.TaskIdRange] + """ + + _attribute_map = { + 'task_ids': {'key': 'taskIds', 'type': '[str]'}, + 'task_id_ranges': {'key': 'taskIdRanges', 'type': '[TaskIdRange]'}, + } + + def __init__(self, **kwargs): + super(TaskDependencies, self).__init__(**kwargs) + self.task_ids = kwargs.get('task_ids', None) + self.task_id_ranges = kwargs.get('task_id_ranges', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_dependencies_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_dependencies_py3.py new file mode 100644 index 00000000..b739ef14 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_dependencies_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskDependencies(Model): + """Specifies any dependencies of a Task. Any Task that is explicitly specified + or within a dependency range must complete before the dependant Task will + be scheduled. + + :param task_ids: The list of Task IDs that this Task depends on. All Tasks + in this list must complete successfully before the dependent Task can be + scheduled. The taskIds collection is limited to 64000 characters total + (i.e. the combined length of all Task IDs). If the taskIds collection + exceeds the maximum length, the Add Task request fails with error code + TaskDependencyListTooLong. In this case consider using Task ID ranges + instead. + :type task_ids: list[str] + :param task_id_ranges: The list of Task ID ranges that this Task depends + on. All Tasks in all ranges must complete successfully before the + dependent Task can be scheduled. + :type task_id_ranges: list[~azure.batch.models.TaskIdRange] + """ + + _attribute_map = { + 'task_ids': {'key': 'taskIds', 'type': '[str]'}, + 'task_id_ranges': {'key': 'taskIdRanges', 'type': '[TaskIdRange]'}, + } + + def __init__(self, *, task_ids=None, task_id_ranges=None, **kwargs) -> None: + super(TaskDependencies, self).__init__(**kwargs) + self.task_ids = task_ids + self.task_id_ranges = task_id_ranges diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_execution_information.py b/azext/generated/sdk/batch/v2019_06_01/models/task_execution_information.py new file mode 100644 index 00000000..7d35208a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_execution_information.py @@ -0,0 +1,109 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskExecutionInformation(Model): + """Information about the execution of a Task. + + All required parameters must be populated in order to send to Azure. + + :param start_time: The time at which the Task started running. 'Running' + corresponds to the running state, so if the Task specifies resource files + or Packages, then the start time reflects the time at which the Task + started downloading or deploying these. If the Task has been restarted or + retried, this is the most recent time at which the Task started running. + This property is present only for Tasks that are in the running or + completed state. + :type start_time: datetime + :param end_time: The time at which the Task completed. This property is + set only if the Task is in the Completed state. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the Task + command line. This property is set only if the Task is in the completed + state. In general, the exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. + However, if the Batch service terminates the Task (due to timeout, or user + termination via the API) you may see an operating system-defined exit + code. + :type exit_code: int + :param container_info: Information about the container under which the + Task is executing. This property is set only if the Task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the Task failure, if any. This + property is set only if the Task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the Task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the Task could not be run) and + file upload errors are not retried. The Batch service will retry the Task + up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the Task + started running. This element is present only if the Task was retried + (i.e. retryCount is nonzero). If present, this is typically the same as + startTime, but may be different if the Task has been restarted for reasons + other than retry; for example, if the Compute Node was rebooted during a + retry, then the startTime is updated but the lastRetryTime is not. + :type last_retry_time: datetime + :param requeue_count: Required. The number of times the Task has been + requeued by the Batch service as the result of a user request. When the + user removes Compute Nodes from a Pool (by resizing/shrinking the pool) or + when the Job is being disabled, the user can specify that running Tasks on + the Compute Nodes be requeued for execution. This count tracks how many + times the Task has been requeued for these reasons. + :type requeue_count: int + :param last_requeue_time: The most recent time at which the Task has been + requeued by the Batch service as the result of a user request. This + property is set only if the requeueCount is nonzero. + :type last_requeue_time: datetime + :param result: The result of the Task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'retry_count': {'required': True}, + 'requeue_count': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'requeue_count': {'key': 'requeueCount', 'type': 'int'}, + 'last_requeue_time': {'key': 'lastRequeueTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, **kwargs): + super(TaskExecutionInformation, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.exit_code = kwargs.get('exit_code', None) + self.container_info = kwargs.get('container_info', None) + self.failure_info = kwargs.get('failure_info', None) + self.retry_count = kwargs.get('retry_count', None) + self.last_retry_time = kwargs.get('last_retry_time', None) + self.requeue_count = kwargs.get('requeue_count', None) + self.last_requeue_time = kwargs.get('last_requeue_time', None) + self.result = kwargs.get('result', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_execution_information_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_execution_information_py3.py new file mode 100644 index 00000000..e36b66b4 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_execution_information_py3.py @@ -0,0 +1,109 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskExecutionInformation(Model): + """Information about the execution of a Task. + + All required parameters must be populated in order to send to Azure. + + :param start_time: The time at which the Task started running. 'Running' + corresponds to the running state, so if the Task specifies resource files + or Packages, then the start time reflects the time at which the Task + started downloading or deploying these. If the Task has been restarted or + retried, this is the most recent time at which the Task started running. + This property is present only for Tasks that are in the running or + completed state. + :type start_time: datetime + :param end_time: The time at which the Task completed. This property is + set only if the Task is in the Completed state. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the Task + command line. This property is set only if the Task is in the completed + state. In general, the exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. + However, if the Batch service terminates the Task (due to timeout, or user + termination via the API) you may see an operating system-defined exit + code. + :type exit_code: int + :param container_info: Information about the container under which the + Task is executing. This property is set only if the Task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the Task failure, if any. This + property is set only if the Task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the Task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the Task could not be run) and + file upload errors are not retried. The Batch service will retry the Task + up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the Task + started running. This element is present only if the Task was retried + (i.e. retryCount is nonzero). If present, this is typically the same as + startTime, but may be different if the Task has been restarted for reasons + other than retry; for example, if the Compute Node was rebooted during a + retry, then the startTime is updated but the lastRetryTime is not. + :type last_retry_time: datetime + :param requeue_count: Required. The number of times the Task has been + requeued by the Batch service as the result of a user request. When the + user removes Compute Nodes from a Pool (by resizing/shrinking the pool) or + when the Job is being disabled, the user can specify that running Tasks on + the Compute Nodes be requeued for execution. This count tracks how many + times the Task has been requeued for these reasons. + :type requeue_count: int + :param last_requeue_time: The most recent time at which the Task has been + requeued by the Batch service as the result of a user request. This + property is set only if the requeueCount is nonzero. + :type last_requeue_time: datetime + :param result: The result of the Task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'retry_count': {'required': True}, + 'requeue_count': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'requeue_count': {'key': 'requeueCount', 'type': 'int'}, + 'last_requeue_time': {'key': 'lastRequeueTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, *, retry_count: int, requeue_count: int, start_time=None, end_time=None, exit_code: int=None, container_info=None, failure_info=None, last_retry_time=None, last_requeue_time=None, result=None, **kwargs) -> None: + super(TaskExecutionInformation, self).__init__(**kwargs) + self.start_time = start_time + self.end_time = end_time + self.exit_code = exit_code + self.container_info = container_info + self.failure_info = failure_info + self.retry_count = retry_count + self.last_retry_time = last_retry_time + self.requeue_count = requeue_count + self.last_requeue_time = last_requeue_time + self.result = result diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_failure_information.py b/azext/generated/sdk/batch/v2019_06_01/models/task_failure_information.py new file mode 100644 index 00000000..dd8ee9e2 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_failure_information.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskFailureInformation(Model): + """Information about a Task failure. + + All required parameters must be populated in order to send to Azure. + + :param category: Required. The category of the Task error. Possible values + include: 'userError', 'serverError' + :type category: str or ~azure.batch.models.ErrorCategory + :param code: An identifier for the Task error. Codes are invariant and are + intended to be consumed programmatically. + :type code: str + :param message: A message describing the Task error, intended to be + suitable for display in a user interface. + :type message: str + :param details: A list of additional details related to the error. + :type details: list[~azure.batch.models.NameValuePair] + """ + + _validation = { + 'category': {'required': True}, + } + + _attribute_map = { + 'category': {'key': 'category', 'type': 'ErrorCategory'}, + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(TaskFailureInformation, self).__init__(**kwargs) + self.category = kwargs.get('category', None) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.details = kwargs.get('details', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_failure_information_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_failure_information_py3.py new file mode 100644 index 00000000..f5156975 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_failure_information_py3.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskFailureInformation(Model): + """Information about a Task failure. + + All required parameters must be populated in order to send to Azure. + + :param category: Required. The category of the Task error. Possible values + include: 'userError', 'serverError' + :type category: str or ~azure.batch.models.ErrorCategory + :param code: An identifier for the Task error. Codes are invariant and are + intended to be consumed programmatically. + :type code: str + :param message: A message describing the Task error, intended to be + suitable for display in a user interface. + :type message: str + :param details: A list of additional details related to the error. + :type details: list[~azure.batch.models.NameValuePair] + """ + + _validation = { + 'category': {'required': True}, + } + + _attribute_map = { + 'category': {'key': 'category', 'type': 'ErrorCategory'}, + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, category, code: str=None, message: str=None, details=None, **kwargs) -> None: + super(TaskFailureInformation, self).__init__(**kwargs) + self.category = category + self.code = code + self.message = message + self.details = details diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_get_options.py b/azext/generated/sdk/batch/v2019_06_01/models/task_get_options.py new file mode 100644 index 00000000..08c1fd8a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_get_options.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_get_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_get_options_py3.py new file mode 100644 index 00000000..68699028 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_get_options_py3.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(TaskGetOptions, self).__init__(**kwargs) + self.select = select + self.expand = expand + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_id_range.py b/azext/generated/sdk/batch/v2019_06_01/models/task_id_range.py new file mode 100644 index 00000000..ffe1ce7a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_id_range.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskIdRange(Model): + """A range of Task IDs that a Task can depend on. All Tasks with IDs in the + range must complete successfully before the dependent Task can be + scheduled. + + The start and end of the range are inclusive. For example, if a range has + start 9 and end 12, then it represents Tasks '9', '10', '11' and '12'. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. The first Task ID in the range. + :type start: int + :param end: Required. The last Task ID in the range. + :type end: int + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'start', 'type': 'int'}, + 'end': {'key': 'end', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(TaskIdRange, self).__init__(**kwargs) + self.start = kwargs.get('start', None) + self.end = kwargs.get('end', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_id_range_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_id_range_py3.py new file mode 100644 index 00000000..a4646459 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_id_range_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskIdRange(Model): + """A range of Task IDs that a Task can depend on. All Tasks with IDs in the + range must complete successfully before the dependent Task can be + scheduled. + + The start and end of the range are inclusive. For example, if a range has + start 9 and end 12, then it represents Tasks '9', '10', '11' and '12'. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. The first Task ID in the range. + :type start: int + :param end: Required. The last Task ID in the range. + :type end: int + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'start', 'type': 'int'}, + 'end': {'key': 'end', 'type': 'int'}, + } + + def __init__(self, *, start: int, end: int, **kwargs) -> None: + super(TaskIdRange, self).__init__(**kwargs) + self.start = start + self.end = end diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_information.py b/azext/generated/sdk/batch/v2019_06_01/models/task_information.py new file mode 100644 index 00000000..87f4d9ad --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_information.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskInformation(Model): + """Information about a Task running on a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param task_url: The URL of the Task. + :type task_url: str + :param job_id: The ID of the Job to which the Task belongs. + :type job_id: str + :param task_id: The ID of the Task. + :type task_id: str + :param subtask_id: The ID of the subtask if the Task is a multi-instance + Task. + :type subtask_id: int + :param task_state: Required. The current state of the Task. Possible + values include: 'active', 'preparing', 'running', 'completed' + :type task_state: str or ~azure.batch.models.TaskState + :param execution_info: Information about the execution of the Task. + :type execution_info: ~azure.batch.models.TaskExecutionInformation + """ + + _validation = { + 'task_state': {'required': True}, + } + + _attribute_map = { + 'task_url': {'key': 'taskUrl', 'type': 'str'}, + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'task_id': {'key': 'taskId', 'type': 'str'}, + 'subtask_id': {'key': 'subtaskId', 'type': 'int'}, + 'task_state': {'key': 'taskState', 'type': 'TaskState'}, + 'execution_info': {'key': 'executionInfo', 'type': 'TaskExecutionInformation'}, + } + + def __init__(self, **kwargs): + super(TaskInformation, self).__init__(**kwargs) + self.task_url = kwargs.get('task_url', None) + self.job_id = kwargs.get('job_id', None) + self.task_id = kwargs.get('task_id', None) + self.subtask_id = kwargs.get('subtask_id', None) + self.task_state = kwargs.get('task_state', None) + self.execution_info = kwargs.get('execution_info', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_information_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_information_py3.py new file mode 100644 index 00000000..982abfe5 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_information_py3.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskInformation(Model): + """Information about a Task running on a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param task_url: The URL of the Task. + :type task_url: str + :param job_id: The ID of the Job to which the Task belongs. + :type job_id: str + :param task_id: The ID of the Task. + :type task_id: str + :param subtask_id: The ID of the subtask if the Task is a multi-instance + Task. + :type subtask_id: int + :param task_state: Required. The current state of the Task. Possible + values include: 'active', 'preparing', 'running', 'completed' + :type task_state: str or ~azure.batch.models.TaskState + :param execution_info: Information about the execution of the Task. + :type execution_info: ~azure.batch.models.TaskExecutionInformation + """ + + _validation = { + 'task_state': {'required': True}, + } + + _attribute_map = { + 'task_url': {'key': 'taskUrl', 'type': 'str'}, + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'task_id': {'key': 'taskId', 'type': 'str'}, + 'subtask_id': {'key': 'subtaskId', 'type': 'int'}, + 'task_state': {'key': 'taskState', 'type': 'TaskState'}, + 'execution_info': {'key': 'executionInfo', 'type': 'TaskExecutionInformation'}, + } + + def __init__(self, *, task_state, task_url: str=None, job_id: str=None, task_id: str=None, subtask_id: int=None, execution_info=None, **kwargs) -> None: + super(TaskInformation, self).__init__(**kwargs) + self.task_url = task_url + self.job_id = job_id + self.task_id = task_id + self.subtask_id = subtask_id + self.task_state = task_state + self.execution_info = execution_info diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_list_options.py b/azext/generated/sdk/batch/v2019_06_01/models/task_list_options.py new file mode 100644 index 00000000..e666a462 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_list_options.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Tasks can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_list_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_list_options_py3.py new file mode 100644 index 00000000..0c129bf3 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_list_options_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Tasks can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(TaskListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.expand = expand + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_list_subtasks_options.py b/azext/generated/sdk/batch/v2019_06_01/models/task_list_subtasks_options.py new file mode 100644 index 00000000..8157cee2 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_list_subtasks_options.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskListSubtasksOptions(Model): + """Additional parameters for list_subtasks operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskListSubtasksOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_list_subtasks_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_list_subtasks_options_py3.py new file mode 100644 index 00000000..b8810800 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_list_subtasks_options_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskListSubtasksOptions(Model): + """Additional parameters for list_subtasks operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(TaskListSubtasksOptions, self).__init__(**kwargs) + self.select = select + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_reactivate_options.py b/azext/generated/sdk/batch/v2019_06_01/models/task_reactivate_options.py new file mode 100644 index 00000000..fe074611 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_reactivate_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskReactivateOptions(Model): + """Additional parameters for reactivate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskReactivateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_reactivate_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_reactivate_options_py3.py new file mode 100644 index 00000000..bd39d6c9 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_reactivate_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskReactivateOptions(Model): + """Additional parameters for reactivate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(TaskReactivateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_scheduling_policy.py b/azext/generated/sdk/batch/v2019_06_01/models/task_scheduling_policy.py new file mode 100644 index 00000000..dc8d6dcb --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_scheduling_policy.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskSchedulingPolicy(Model): + """Specifies how Tasks should be distributed across Compute Nodes. + + All required parameters must be populated in order to send to Azure. + + :param node_fill_type: Required. How Tasks are distributed across Compute + Nodes in a Pool. If not specified, the default is spread. Possible values + include: 'spread', 'pack' + :type node_fill_type: str or ~azure.batch.models.ComputeNodeFillType + """ + + _validation = { + 'node_fill_type': {'required': True}, + } + + _attribute_map = { + 'node_fill_type': {'key': 'nodeFillType', 'type': 'ComputeNodeFillType'}, + } + + def __init__(self, **kwargs): + super(TaskSchedulingPolicy, self).__init__(**kwargs) + self.node_fill_type = kwargs.get('node_fill_type', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_scheduling_policy_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_scheduling_policy_py3.py new file mode 100644 index 00000000..61a47621 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_scheduling_policy_py3.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskSchedulingPolicy(Model): + """Specifies how Tasks should be distributed across Compute Nodes. + + All required parameters must be populated in order to send to Azure. + + :param node_fill_type: Required. How Tasks are distributed across Compute + Nodes in a Pool. If not specified, the default is spread. Possible values + include: 'spread', 'pack' + :type node_fill_type: str or ~azure.batch.models.ComputeNodeFillType + """ + + _validation = { + 'node_fill_type': {'required': True}, + } + + _attribute_map = { + 'node_fill_type': {'key': 'nodeFillType', 'type': 'ComputeNodeFillType'}, + } + + def __init__(self, *, node_fill_type, **kwargs) -> None: + super(TaskSchedulingPolicy, self).__init__(**kwargs) + self.node_fill_type = node_fill_type diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_statistics.py b/azext/generated/sdk/batch/v2019_06_01/models/task_statistics.py new file mode 100644 index 00000000..2d2f3169 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_statistics.py @@ -0,0 +1,100 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskStatistics(Model): + """Resource usage statistics for a Task. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all Compute Nodes) consumed by the Task. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all Compute Nodes) consumed by the Task. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of the Task. + The wall clock time is the elapsed time from when the Task started running + on a Compute Node to when it finished (or to the last time the statistics + were updated, if the Task had not finished by then). If the Task was + retried, this includes the wall clock time of all the Task retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by the Task. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by the Task. + :type write_iops: long + :param read_io_gi_b: Required. The total gibibytes read from disk by the + Task. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total gibibytes written to disk by the + Task. + :type write_io_gi_b: float + :param wait_time: Required. The total wait time of the Task. The wait time + for a Task is defined as the elapsed time between the creation of the Task + and the start of Task execution. (If the Task is retried due to failures, + the wait time is the time to the most recent Task execution.). + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(TaskStatistics, self).__init__(**kwargs) + self.url = kwargs.get('url', None) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.user_cpu_time = kwargs.get('user_cpu_time', None) + self.kernel_cpu_time = kwargs.get('kernel_cpu_time', None) + self.wall_clock_time = kwargs.get('wall_clock_time', None) + self.read_iops = kwargs.get('read_iops', None) + self.write_iops = kwargs.get('write_iops', None) + self.read_io_gi_b = kwargs.get('read_io_gi_b', None) + self.write_io_gi_b = kwargs.get('write_io_gi_b', None) + self.wait_time = kwargs.get('wait_time', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_statistics_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_statistics_py3.py new file mode 100644 index 00000000..c9f5c916 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_statistics_py3.py @@ -0,0 +1,100 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskStatistics(Model): + """Resource usage statistics for a Task. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all Compute Nodes) consumed by the Task. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all Compute Nodes) consumed by the Task. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of the Task. + The wall clock time is the elapsed time from when the Task started running + on a Compute Node to when it finished (or to the last time the statistics + were updated, if the Task had not finished by then). If the Task was + retried, this includes the wall clock time of all the Task retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by the Task. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by the Task. + :type write_iops: long + :param read_io_gi_b: Required. The total gibibytes read from disk by the + Task. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total gibibytes written to disk by the + Task. + :type write_io_gi_b: float + :param wait_time: Required. The total wait time of the Task. The wait time + for a Task is defined as the elapsed time between the creation of the Task + and the start of Task execution. (If the Task is retried due to failures, + the wait time is the time to the most recent Task execution.). + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, *, url: str, start_time, last_update_time, user_cpu_time, kernel_cpu_time, wall_clock_time, read_iops: int, write_iops: int, read_io_gi_b: float, write_io_gi_b: float, wait_time, **kwargs) -> None: + super(TaskStatistics, self).__init__(**kwargs) + self.url = url + self.start_time = start_time + self.last_update_time = last_update_time + self.user_cpu_time = user_cpu_time + self.kernel_cpu_time = kernel_cpu_time + self.wall_clock_time = wall_clock_time + self.read_iops = read_iops + self.write_iops = write_iops + self.read_io_gi_b = read_io_gi_b + self.write_io_gi_b = write_io_gi_b + self.wait_time = wait_time diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_terminate_options.py b/azext/generated/sdk/batch/v2019_06_01/models/task_terminate_options.py new file mode 100644 index 00000000..1908a9da --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_terminate_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskTerminateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_terminate_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_terminate_options_py3.py new file mode 100644 index 00000000..d967db3a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_terminate_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(TaskTerminateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_update_options.py b/azext/generated/sdk/batch/v2019_06_01/models/task_update_options.py new file mode 100644 index 00000000..32e1ad82 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_update_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskUpdateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_update_options_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_update_options_py3.py new file mode 100644 index 00000000..2a20ddf5 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_update_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(TaskUpdateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_update_parameter.py b/azext/generated/sdk/batch/v2019_06_01/models/task_update_parameter.py new file mode 100644 index 00000000..dfbcb1c0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_update_parameter.py @@ -0,0 +1,30 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskUpdateParameter(Model): + """The set of changes to be made to a Task. + + :param constraints: Constraints that apply to this Task. If omitted, the + Task is given the default constraints. For multi-instance Tasks, updating + the retention time applies only to the primary Task and not subtasks. + :type constraints: ~azure.batch.models.TaskConstraints + """ + + _attribute_map = { + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + } + + def __init__(self, **kwargs): + super(TaskUpdateParameter, self).__init__(**kwargs) + self.constraints = kwargs.get('constraints', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/task_update_parameter_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/task_update_parameter_py3.py new file mode 100644 index 00000000..7341a52d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/task_update_parameter_py3.py @@ -0,0 +1,30 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskUpdateParameter(Model): + """The set of changes to be made to a Task. + + :param constraints: Constraints that apply to this Task. If omitted, the + Task is given the default constraints. For multi-instance Tasks, updating + the retention time applies only to the primary Task and not subtasks. + :type constraints: ~azure.batch.models.TaskConstraints + """ + + _attribute_map = { + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + } + + def __init__(self, *, constraints=None, **kwargs) -> None: + super(TaskUpdateParameter, self).__init__(**kwargs) + self.constraints = constraints diff --git a/azext/generated/sdk/batch/v2019_06_01/models/upload_batch_service_logs_configuration.py b/azext/generated/sdk/batch/v2019_06_01/models/upload_batch_service_logs_configuration.py new file mode 100644 index 00000000..ab05cce9 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/upload_batch_service_logs_configuration.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UploadBatchServiceLogsConfiguration(Model): + """The Azure Batch service log files upload configuration for a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param container_url: Required. The URL of the container within Azure Blob + Storage to which to upload the Batch Service log file(s). The URL must + include a Shared Access Signature (SAS) granting write permissions to the + container. The SAS duration must allow enough time for the upload to + finish. The start time for SAS is optional and recommended to not be + specified. + :type container_url: str + :param start_time: Required. The start of the time range from which to + upload Batch Service log file(s). Any log file containing a log message in + the time range will be uploaded. This means that the operation might + retrieve more logs than have been requested since the entire log file is + always uploaded, but the operation should not retrieve fewer logs than + have been requested. + :type start_time: datetime + :param end_time: The end of the time range from which to upload Batch + Service log file(s). Any log file containing a log message in the time + range will be uploaded. This means that the operation might retrieve more + logs than have been requested since the entire log file is always + uploaded, but the operation should not retrieve fewer logs than have been + requested. If omitted, the default is to upload all logs available after + the startTime. + :type end_time: datetime + """ + + _validation = { + 'container_url': {'required': True}, + 'start_time': {'required': True}, + } + + _attribute_map = { + 'container_url': {'key': 'containerUrl', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(UploadBatchServiceLogsConfiguration, self).__init__(**kwargs) + self.container_url = kwargs.get('container_url', None) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/upload_batch_service_logs_configuration_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/upload_batch_service_logs_configuration_py3.py new file mode 100644 index 00000000..26270c97 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/upload_batch_service_logs_configuration_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UploadBatchServiceLogsConfiguration(Model): + """The Azure Batch service log files upload configuration for a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param container_url: Required. The URL of the container within Azure Blob + Storage to which to upload the Batch Service log file(s). The URL must + include a Shared Access Signature (SAS) granting write permissions to the + container. The SAS duration must allow enough time for the upload to + finish. The start time for SAS is optional and recommended to not be + specified. + :type container_url: str + :param start_time: Required. The start of the time range from which to + upload Batch Service log file(s). Any log file containing a log message in + the time range will be uploaded. This means that the operation might + retrieve more logs than have been requested since the entire log file is + always uploaded, but the operation should not retrieve fewer logs than + have been requested. + :type start_time: datetime + :param end_time: The end of the time range from which to upload Batch + Service log file(s). Any log file containing a log message in the time + range will be uploaded. This means that the operation might retrieve more + logs than have been requested since the entire log file is always + uploaded, but the operation should not retrieve fewer logs than have been + requested. If omitted, the default is to upload all logs available after + the startTime. + :type end_time: datetime + """ + + _validation = { + 'container_url': {'required': True}, + 'start_time': {'required': True}, + } + + _attribute_map = { + 'container_url': {'key': 'containerUrl', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + } + + def __init__(self, *, container_url: str, start_time, end_time=None, **kwargs) -> None: + super(UploadBatchServiceLogsConfiguration, self).__init__(**kwargs) + self.container_url = container_url + self.start_time = start_time + self.end_time = end_time diff --git a/azext/generated/sdk/batch/v2019_06_01/models/upload_batch_service_logs_result.py b/azext/generated/sdk/batch/v2019_06_01/models/upload_batch_service_logs_result.py new file mode 100644 index 00000000..f0928125 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/upload_batch_service_logs_result.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UploadBatchServiceLogsResult(Model): + """The result of uploading Batch service log files from a specific Compute + Node. + + All required parameters must be populated in order to send to Azure. + + :param virtual_directory_name: Required. The virtual directory within + Azure Blob Storage container to which the Batch Service log file(s) will + be uploaded. The virtual directory name is part of the blob name for each + log file uploaded, and it is built based poolId, nodeId and a unique + identifier. + :type virtual_directory_name: str + :param number_of_files_uploaded: Required. The number of log files which + will be uploaded. + :type number_of_files_uploaded: int + """ + + _validation = { + 'virtual_directory_name': {'required': True}, + 'number_of_files_uploaded': {'required': True}, + } + + _attribute_map = { + 'virtual_directory_name': {'key': 'virtualDirectoryName', 'type': 'str'}, + 'number_of_files_uploaded': {'key': 'numberOfFilesUploaded', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(UploadBatchServiceLogsResult, self).__init__(**kwargs) + self.virtual_directory_name = kwargs.get('virtual_directory_name', None) + self.number_of_files_uploaded = kwargs.get('number_of_files_uploaded', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/upload_batch_service_logs_result_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/upload_batch_service_logs_result_py3.py new file mode 100644 index 00000000..b23d902e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/upload_batch_service_logs_result_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UploadBatchServiceLogsResult(Model): + """The result of uploading Batch service log files from a specific Compute + Node. + + All required parameters must be populated in order to send to Azure. + + :param virtual_directory_name: Required. The virtual directory within + Azure Blob Storage container to which the Batch Service log file(s) will + be uploaded. The virtual directory name is part of the blob name for each + log file uploaded, and it is built based poolId, nodeId and a unique + identifier. + :type virtual_directory_name: str + :param number_of_files_uploaded: Required. The number of log files which + will be uploaded. + :type number_of_files_uploaded: int + """ + + _validation = { + 'virtual_directory_name': {'required': True}, + 'number_of_files_uploaded': {'required': True}, + } + + _attribute_map = { + 'virtual_directory_name': {'key': 'virtualDirectoryName', 'type': 'str'}, + 'number_of_files_uploaded': {'key': 'numberOfFilesUploaded', 'type': 'int'}, + } + + def __init__(self, *, virtual_directory_name: str, number_of_files_uploaded: int, **kwargs) -> None: + super(UploadBatchServiceLogsResult, self).__init__(**kwargs) + self.virtual_directory_name = virtual_directory_name + self.number_of_files_uploaded = number_of_files_uploaded diff --git a/azext/generated/sdk/batch/v2019_06_01/models/usage_statistics.py b/azext/generated/sdk/batch/v2019_06_01/models/usage_statistics.py new file mode 100644 index 00000000..848656b9 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/usage_statistics.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UsageStatistics(Model): + """Statistics related to Pool usage information. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param dedicated_core_time: Required. The aggregated wall-clock time of + the dedicated Compute Node cores being part of the Pool. + :type dedicated_core_time: timedelta + """ + + _validation = { + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'dedicated_core_time': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'dedicated_core_time': {'key': 'dedicatedCoreTime', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(UsageStatistics, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.dedicated_core_time = kwargs.get('dedicated_core_time', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/usage_statistics_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/usage_statistics_py3.py new file mode 100644 index 00000000..24ff9e4c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/usage_statistics_py3.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UsageStatistics(Model): + """Statistics related to Pool usage information. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param dedicated_core_time: Required. The aggregated wall-clock time of + the dedicated Compute Node cores being part of the Pool. + :type dedicated_core_time: timedelta + """ + + _validation = { + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'dedicated_core_time': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'dedicated_core_time': {'key': 'dedicatedCoreTime', 'type': 'duration'}, + } + + def __init__(self, *, start_time, last_update_time, dedicated_core_time, **kwargs) -> None: + super(UsageStatistics, self).__init__(**kwargs) + self.start_time = start_time + self.last_update_time = last_update_time + self.dedicated_core_time = dedicated_core_time diff --git a/azext/generated/sdk/batch/v2019_06_01/models/user_account.py b/azext/generated/sdk/batch/v2019_06_01/models/user_account.py new file mode 100644 index 00000000..c1f7f276 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/user_account.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UserAccount(Model): + """Properties used to create a user used to execute Tasks on an Azure Batch + Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the user Account. + :type name: str + :param password: Required. The password for the user Account. + :type password: str + :param elevation_level: The elevation level of the user Account. The + default value is nonAdmin. Possible values include: 'nonAdmin', 'admin' + :type elevation_level: str or ~azure.batch.models.ElevationLevel + :param linux_user_configuration: The Linux-specific user configuration for + the user Account. This property is ignored if specified on a Windows Pool. + If not specified, the user is created with the default options. + :type linux_user_configuration: ~azure.batch.models.LinuxUserConfiguration + :param windows_user_configuration: The Windows-specific user configuration + for the user Account. This property can only be specified if the user is + on a Windows Pool. If not specified and on a Windows Pool, the user is + created with the default options. + :type windows_user_configuration: + ~azure.batch.models.WindowsUserConfiguration + """ + + _validation = { + 'name': {'required': True}, + 'password': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, + 'linux_user_configuration': {'key': 'linuxUserConfiguration', 'type': 'LinuxUserConfiguration'}, + 'windows_user_configuration': {'key': 'windowsUserConfiguration', 'type': 'WindowsUserConfiguration'}, + } + + def __init__(self, **kwargs): + super(UserAccount, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.password = kwargs.get('password', None) + self.elevation_level = kwargs.get('elevation_level', None) + self.linux_user_configuration = kwargs.get('linux_user_configuration', None) + self.windows_user_configuration = kwargs.get('windows_user_configuration', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/user_account_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/user_account_py3.py new file mode 100644 index 00000000..fc768441 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/user_account_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UserAccount(Model): + """Properties used to create a user used to execute Tasks on an Azure Batch + Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the user Account. + :type name: str + :param password: Required. The password for the user Account. + :type password: str + :param elevation_level: The elevation level of the user Account. The + default value is nonAdmin. Possible values include: 'nonAdmin', 'admin' + :type elevation_level: str or ~azure.batch.models.ElevationLevel + :param linux_user_configuration: The Linux-specific user configuration for + the user Account. This property is ignored if specified on a Windows Pool. + If not specified, the user is created with the default options. + :type linux_user_configuration: ~azure.batch.models.LinuxUserConfiguration + :param windows_user_configuration: The Windows-specific user configuration + for the user Account. This property can only be specified if the user is + on a Windows Pool. If not specified and on a Windows Pool, the user is + created with the default options. + :type windows_user_configuration: + ~azure.batch.models.WindowsUserConfiguration + """ + + _validation = { + 'name': {'required': True}, + 'password': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, + 'linux_user_configuration': {'key': 'linuxUserConfiguration', 'type': 'LinuxUserConfiguration'}, + 'windows_user_configuration': {'key': 'windowsUserConfiguration', 'type': 'WindowsUserConfiguration'}, + } + + def __init__(self, *, name: str, password: str, elevation_level=None, linux_user_configuration=None, windows_user_configuration=None, **kwargs) -> None: + super(UserAccount, self).__init__(**kwargs) + self.name = name + self.password = password + self.elevation_level = elevation_level + self.linux_user_configuration = linux_user_configuration + self.windows_user_configuration = windows_user_configuration diff --git a/azext/generated/sdk/batch/v2019_06_01/models/user_identity.py b/azext/generated/sdk/batch/v2019_06_01/models/user_identity.py new file mode 100644 index 00000000..ce8ec66e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/user_identity.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UserIdentity(Model): + """The definition of the user identity under which the Task is run. + + Specify either the userName or autoUser property, but not both. + + :param user_name: The name of the user identity under which the Task is + run. The userName and autoUser properties are mutually exclusive; you must + specify one but not both. + :type user_name: str + :param auto_user: The auto user under which the Task is run. The userName + and autoUser properties are mutually exclusive; you must specify one but + not both. + :type auto_user: ~azure.batch.models.AutoUserSpecification + """ + + _attribute_map = { + 'user_name': {'key': 'username', 'type': 'str'}, + 'auto_user': {'key': 'autoUser', 'type': 'AutoUserSpecification'}, + } + + def __init__(self, **kwargs): + super(UserIdentity, self).__init__(**kwargs) + self.user_name = kwargs.get('user_name', None) + self.auto_user = kwargs.get('auto_user', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/user_identity_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/user_identity_py3.py new file mode 100644 index 00000000..bf913010 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/user_identity_py3.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UserIdentity(Model): + """The definition of the user identity under which the Task is run. + + Specify either the userName or autoUser property, but not both. + + :param user_name: The name of the user identity under which the Task is + run. The userName and autoUser properties are mutually exclusive; you must + specify one but not both. + :type user_name: str + :param auto_user: The auto user under which the Task is run. The userName + and autoUser properties are mutually exclusive; you must specify one but + not both. + :type auto_user: ~azure.batch.models.AutoUserSpecification + """ + + _attribute_map = { + 'user_name': {'key': 'username', 'type': 'str'}, + 'auto_user': {'key': 'autoUser', 'type': 'AutoUserSpecification'}, + } + + def __init__(self, *, user_name: str=None, auto_user=None, **kwargs) -> None: + super(UserIdentity, self).__init__(**kwargs) + self.user_name = user_name + self.auto_user = auto_user diff --git a/azext/generated/sdk/batch/v2019_06_01/models/virtual_machine_configuration.py b/azext/generated/sdk/batch/v2019_06_01/models/virtual_machine_configuration.py new file mode 100644 index 00000000..0a4c4d6e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/virtual_machine_configuration.py @@ -0,0 +1,88 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class VirtualMachineConfiguration(Model): + """The configuration for Compute Nodes in a Pool based on the Azure Virtual + Machines infrastructure. + + All required parameters must be populated in order to send to Azure. + + :param image_reference: Required. A reference to the Azure Virtual + Machines Marketplace Image or the custom Virtual Machine Image to use. + :type image_reference: ~azure.batch.models.ImageReference + :param node_agent_sku_id: Required. The SKU of the Batch Compute Node + agent to be provisioned on Compute Nodes in the Pool. The Batch Compute + Node agent is a program that runs on each Compute Node in the Pool, and + provides the command-and-control interface between the Compute Node and + the Batch service. There are different implementations of the Compute Node + agent, known as SKUs, for different operating systems. You must specify a + Compute Node agent SKU which matches the selected Image reference. To get + the list of supported Compute Node agent SKUs along with their list of + verified Image references, see the 'List supported Compute Node agent + SKUs' operation. + :type node_agent_sku_id: str + :param windows_configuration: Windows operating system settings on the + virtual machine. This property must not be specified if the imageReference + property specifies a Linux OS Image. + :type windows_configuration: ~azure.batch.models.WindowsConfiguration + :param data_disks: The configuration for data disks attached to the + Compute Nodes in the Pool. This property must be specified if the Compute + Nodes in the Pool need to have empty data disks attached to them. This + cannot be updated. Each Compute Node gets its own disk (the disk is not a + file share). Existing disks cannot be attached, each attached disk is + empty. When the Compute Node is removed from the Pool, the disk and all + data associated with it is also deleted. The disk is not formatted after + being attached, it must be formatted before use - for more information see + https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux + and + https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. + :type data_disks: list[~azure.batch.models.DataDisk] + :param license_type: The type of on-premises license to be used when + deploying the operating system. This only applies to Images that contain + the Windows operating system, and should only be used when you hold valid + on-premises licenses for the Compute Nodes which will be deployed. If + omitted, no on-premises licensing discount is applied. Values are: + Windows_Server - The on-premises license is for Windows Server. + Windows_Client - The on-premises license is for Windows Client. + :type license_type: str + :param container_configuration: The container configuration for the Pool. + If specified, setup is performed on each Compute Node in the Pool to allow + Tasks to run in containers. All regular Tasks and Job manager Tasks run on + this Pool must specify the containerSettings property, and all other Tasks + may specify it. + :type container_configuration: ~azure.batch.models.ContainerConfiguration + """ + + _validation = { + 'image_reference': {'required': True}, + 'node_agent_sku_id': {'required': True}, + } + + _attribute_map = { + 'image_reference': {'key': 'imageReference', 'type': 'ImageReference'}, + 'node_agent_sku_id': {'key': 'nodeAgentSKUId', 'type': 'str'}, + 'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'}, + 'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'}, + 'license_type': {'key': 'licenseType', 'type': 'str'}, + 'container_configuration': {'key': 'containerConfiguration', 'type': 'ContainerConfiguration'}, + } + + def __init__(self, **kwargs): + super(VirtualMachineConfiguration, self).__init__(**kwargs) + self.image_reference = kwargs.get('image_reference', None) + self.node_agent_sku_id = kwargs.get('node_agent_sku_id', None) + self.windows_configuration = kwargs.get('windows_configuration', None) + self.data_disks = kwargs.get('data_disks', None) + self.license_type = kwargs.get('license_type', None) + self.container_configuration = kwargs.get('container_configuration', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/virtual_machine_configuration_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/virtual_machine_configuration_py3.py new file mode 100644 index 00000000..29a7e9d7 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/virtual_machine_configuration_py3.py @@ -0,0 +1,88 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class VirtualMachineConfiguration(Model): + """The configuration for Compute Nodes in a Pool based on the Azure Virtual + Machines infrastructure. + + All required parameters must be populated in order to send to Azure. + + :param image_reference: Required. A reference to the Azure Virtual + Machines Marketplace Image or the custom Virtual Machine Image to use. + :type image_reference: ~azure.batch.models.ImageReference + :param node_agent_sku_id: Required. The SKU of the Batch Compute Node + agent to be provisioned on Compute Nodes in the Pool. The Batch Compute + Node agent is a program that runs on each Compute Node in the Pool, and + provides the command-and-control interface between the Compute Node and + the Batch service. There are different implementations of the Compute Node + agent, known as SKUs, for different operating systems. You must specify a + Compute Node agent SKU which matches the selected Image reference. To get + the list of supported Compute Node agent SKUs along with their list of + verified Image references, see the 'List supported Compute Node agent + SKUs' operation. + :type node_agent_sku_id: str + :param windows_configuration: Windows operating system settings on the + virtual machine. This property must not be specified if the imageReference + property specifies a Linux OS Image. + :type windows_configuration: ~azure.batch.models.WindowsConfiguration + :param data_disks: The configuration for data disks attached to the + Compute Nodes in the Pool. This property must be specified if the Compute + Nodes in the Pool need to have empty data disks attached to them. This + cannot be updated. Each Compute Node gets its own disk (the disk is not a + file share). Existing disks cannot be attached, each attached disk is + empty. When the Compute Node is removed from the Pool, the disk and all + data associated with it is also deleted. The disk is not formatted after + being attached, it must be formatted before use - for more information see + https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux + and + https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. + :type data_disks: list[~azure.batch.models.DataDisk] + :param license_type: The type of on-premises license to be used when + deploying the operating system. This only applies to Images that contain + the Windows operating system, and should only be used when you hold valid + on-premises licenses for the Compute Nodes which will be deployed. If + omitted, no on-premises licensing discount is applied. Values are: + Windows_Server - The on-premises license is for Windows Server. + Windows_Client - The on-premises license is for Windows Client. + :type license_type: str + :param container_configuration: The container configuration for the Pool. + If specified, setup is performed on each Compute Node in the Pool to allow + Tasks to run in containers. All regular Tasks and Job manager Tasks run on + this Pool must specify the containerSettings property, and all other Tasks + may specify it. + :type container_configuration: ~azure.batch.models.ContainerConfiguration + """ + + _validation = { + 'image_reference': {'required': True}, + 'node_agent_sku_id': {'required': True}, + } + + _attribute_map = { + 'image_reference': {'key': 'imageReference', 'type': 'ImageReference'}, + 'node_agent_sku_id': {'key': 'nodeAgentSKUId', 'type': 'str'}, + 'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'}, + 'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'}, + 'license_type': {'key': 'licenseType', 'type': 'str'}, + 'container_configuration': {'key': 'containerConfiguration', 'type': 'ContainerConfiguration'}, + } + + def __init__(self, *, image_reference, node_agent_sku_id: str, windows_configuration=None, data_disks=None, license_type: str=None, container_configuration=None, **kwargs) -> None: + super(VirtualMachineConfiguration, self).__init__(**kwargs) + self.image_reference = image_reference + self.node_agent_sku_id = node_agent_sku_id + self.windows_configuration = windows_configuration + self.data_disks = data_disks + self.license_type = license_type + self.container_configuration = container_configuration diff --git a/azext/generated/sdk/batch/v2019_06_01/models/windows_configuration.py b/azext/generated/sdk/batch/v2019_06_01/models/windows_configuration.py new file mode 100644 index 00000000..6b27533d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/windows_configuration.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class WindowsConfiguration(Model): + """Windows operating system settings to apply to the virtual machine. + + :param enable_automatic_updates: Whether automatic updates are enabled on + the virtual machine. If omitted, the default value is true. + :type enable_automatic_updates: bool + """ + + _attribute_map = { + 'enable_automatic_updates': {'key': 'enableAutomaticUpdates', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(WindowsConfiguration, self).__init__(**kwargs) + self.enable_automatic_updates = kwargs.get('enable_automatic_updates', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/windows_configuration_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/windows_configuration_py3.py new file mode 100644 index 00000000..40a4aedf --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/windows_configuration_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class WindowsConfiguration(Model): + """Windows operating system settings to apply to the virtual machine. + + :param enable_automatic_updates: Whether automatic updates are enabled on + the virtual machine. If omitted, the default value is true. + :type enable_automatic_updates: bool + """ + + _attribute_map = { + 'enable_automatic_updates': {'key': 'enableAutomaticUpdates', 'type': 'bool'}, + } + + def __init__(self, *, enable_automatic_updates: bool=None, **kwargs) -> None: + super(WindowsConfiguration, self).__init__(**kwargs) + self.enable_automatic_updates = enable_automatic_updates diff --git a/azext/generated/sdk/batch/v2019_06_01/models/windows_user_configuration.py b/azext/generated/sdk/batch/v2019_06_01/models/windows_user_configuration.py new file mode 100644 index 00000000..7695d88d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/windows_user_configuration.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class WindowsUserConfiguration(Model): + """Properties used to create a user Account on a Windows Compute Node. + + :param login_mode: The login mode for the user. The default value for + VirtualMachineConfiguration Pools is 'batch' and for + CloudServiceConfiguration Pools is 'interactive'. Possible values include: + 'batch', 'interactive' + :type login_mode: str or ~azure.batch.models.LoginMode + """ + + _attribute_map = { + 'login_mode': {'key': 'loginMode', 'type': 'LoginMode'}, + } + + def __init__(self, **kwargs): + super(WindowsUserConfiguration, self).__init__(**kwargs) + self.login_mode = kwargs.get('login_mode', None) diff --git a/azext/generated/sdk/batch/v2019_06_01/models/windows_user_configuration_py3.py b/azext/generated/sdk/batch/v2019_06_01/models/windows_user_configuration_py3.py new file mode 100644 index 00000000..7eaf424f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/models/windows_user_configuration_py3.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class WindowsUserConfiguration(Model): + """Properties used to create a user Account on a Windows Compute Node. + + :param login_mode: The login mode for the user. The default value for + VirtualMachineConfiguration Pools is 'batch' and for + CloudServiceConfiguration Pools is 'interactive'. Possible values include: + 'batch', 'interactive' + :type login_mode: str or ~azure.batch.models.LoginMode + """ + + _attribute_map = { + 'login_mode': {'key': 'loginMode', 'type': 'LoginMode'}, + } + + def __init__(self, *, login_mode=None, **kwargs) -> None: + super(WindowsUserConfiguration, self).__init__(**kwargs) + self.login_mode = login_mode diff --git a/azext/generated/sdk/batch/v2019_06_01/operations/__init__.py b/azext/generated/sdk/batch/v2019_06_01/operations/__init__.py new file mode 100644 index 00000000..5b1c54cc --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/operations/__init__.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_operations import ApplicationOperations +from .pool_operations import PoolOperations +from .account_operations import AccountOperations +from .job_operations import JobOperations +from .certificate_operations import CertificateOperations +from .file_operations import FileOperations +from .job_schedule_operations import JobScheduleOperations +from .task_operations import TaskOperations +from .compute_node_operations import ComputeNodeOperations + +__all__ = [ + 'ApplicationOperations', + 'PoolOperations', + 'AccountOperations', + 'JobOperations', + 'CertificateOperations', + 'FileOperations', + 'JobScheduleOperations', + 'TaskOperations', + 'ComputeNodeOperations', +] diff --git a/azext/generated/sdk/batch/v2019_06_01/operations/account_operations.py b/azext/generated/sdk/batch/v2019_06_01/operations/account_operations.py new file mode 100644 index 00000000..59d82220 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/operations/account_operations.py @@ -0,0 +1,233 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class AccountOperations(object): + """AccountOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2019-06-01.9.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2019-06-01.9.0" + + self.config = config + + def list_supported_images( + self, account_list_supported_images_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all Virtual Machine Images supported by the Azure Batch service. + + :param account_list_supported_images_options: Additional parameters + for the operation + :type account_list_supported_images_options: + ~azure.batch.models.AccountListSupportedImagesOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of ImageInformation + :rtype: + ~azure.batch.models.ImageInformationPaged[~azure.batch.models.ImageInformation] + :raises: + :class:`BatchErrorException` + """ + filter = None + if account_list_supported_images_options is not None: + filter = account_list_supported_images_options.filter + max_results = None + if account_list_supported_images_options is not None: + max_results = account_list_supported_images_options.max_results + timeout = None + if account_list_supported_images_options is not None: + timeout = account_list_supported_images_options.timeout + client_request_id = None + if account_list_supported_images_options is not None: + client_request_id = account_list_supported_images_options.client_request_id + return_client_request_id = None + if account_list_supported_images_options is not None: + return_client_request_id = account_list_supported_images_options.return_client_request_id + ocp_date = None + if account_list_supported_images_options is not None: + ocp_date = account_list_supported_images_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_supported_images.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.ImageInformationPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.ImageInformationPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_supported_images.metadata = {'url': '/supportedimages'} + + def list_pool_node_counts( + self, account_list_pool_node_counts_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the number of Compute Nodes in each state, grouped by Pool. + + :param account_list_pool_node_counts_options: Additional parameters + for the operation + :type account_list_pool_node_counts_options: + ~azure.batch.models.AccountListPoolNodeCountsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of PoolNodeCounts + :rtype: + ~azure.batch.models.PoolNodeCountsPaged[~azure.batch.models.PoolNodeCounts] + :raises: + :class:`BatchErrorException` + """ + filter = None + if account_list_pool_node_counts_options is not None: + filter = account_list_pool_node_counts_options.filter + max_results = None + if account_list_pool_node_counts_options is not None: + max_results = account_list_pool_node_counts_options.max_results + timeout = None + if account_list_pool_node_counts_options is not None: + timeout = account_list_pool_node_counts_options.timeout + client_request_id = None + if account_list_pool_node_counts_options is not None: + client_request_id = account_list_pool_node_counts_options.client_request_id + return_client_request_id = None + if account_list_pool_node_counts_options is not None: + return_client_request_id = account_list_pool_node_counts_options.return_client_request_id + ocp_date = None + if account_list_pool_node_counts_options is not None: + ocp_date = account_list_pool_node_counts_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_pool_node_counts.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=10, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.PoolNodeCountsPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.PoolNodeCountsPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_pool_node_counts.metadata = {'url': '/nodecounts'} diff --git a/azext/generated/sdk/batch/v2019_06_01/operations/application_operations.py b/azext/generated/sdk/batch/v2019_06_01/operations/application_operations.py new file mode 100644 index 00000000..4553a0ce --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/operations/application_operations.py @@ -0,0 +1,233 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class ApplicationOperations(object): + """ApplicationOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2019-06-01.9.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2019-06-01.9.0" + + self.config = config + + def list( + self, application_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the applications available in the specified Account. + + This operation returns only Applications and versions that are + available for use on Compute Nodes; that is, that can be used in an + Package reference. For administrator information about applications and + versions that are not yet available to Compute Nodes, use the Azure + portal or the Azure Resource Manager API. + + :param application_list_options: Additional parameters for the + operation + :type application_list_options: + ~azure.batch.models.ApplicationListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of ApplicationSummary + :rtype: + ~azure.batch.models.ApplicationSummaryPaged[~azure.batch.models.ApplicationSummary] + :raises: + :class:`BatchErrorException` + """ + max_results = None + if application_list_options is not None: + max_results = application_list_options.max_results + timeout = None + if application_list_options is not None: + timeout = application_list_options.timeout + client_request_id = None + if application_list_options is not None: + client_request_id = application_list_options.client_request_id + return_client_request_id = None + if application_list_options is not None: + return_client_request_id = application_list_options.return_client_request_id + ocp_date = None + if application_list_options is not None: + ocp_date = application_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.ApplicationSummaryPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.ApplicationSummaryPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/applications'} + + def get( + self, application_id, application_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified Application. + + This operation returns only Applications and versions that are + available for use on Compute Nodes; that is, that can be used in an + Package reference. For administrator information about Applications and + versions that are not yet available to Compute Nodes, use the Azure + portal or the Azure Resource Manager API. + + :param application_id: The ID of the Application. + :type application_id: str + :param application_get_options: Additional parameters for the + operation + :type application_get_options: + ~azure.batch.models.ApplicationGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ApplicationSummary or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.ApplicationSummary or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if application_get_options is not None: + timeout = application_get_options.timeout + client_request_id = None + if application_get_options is not None: + client_request_id = application_get_options.client_request_id + return_client_request_id = None + if application_get_options is not None: + return_client_request_id = application_get_options.return_client_request_id + ocp_date = None + if application_get_options is not None: + ocp_date = application_get_options.ocp_date + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'applicationId': self._serialize.url("application_id", application_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('ApplicationSummary', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/applications/{applicationId}'} diff --git a/azext/generated/sdk/batch/v2019_06_01/operations/certificate_operations.py b/azext/generated/sdk/batch/v2019_06_01/operations/certificate_operations.py new file mode 100644 index 00000000..0f633288 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/operations/certificate_operations.py @@ -0,0 +1,514 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class CertificateOperations(object): + """CertificateOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2019-06-01.9.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2019-06-01.9.0" + + self.config = config + + def add( + self, certificate, certificate_add_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a Certificate to the specified Account. + + :param certificate: The Certificate to be added. + :type certificate: ~azure.batch.models.CertificateAddParameter + :param certificate_add_options: Additional parameters for the + operation + :type certificate_add_options: + ~azure.batch.models.CertificateAddOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if certificate_add_options is not None: + timeout = certificate_add_options.timeout + client_request_id = None + if certificate_add_options is not None: + client_request_id = certificate_add_options.client_request_id + return_client_request_id = None + if certificate_add_options is not None: + return_client_request_id = certificate_add_options.return_client_request_id + ocp_date = None + if certificate_add_options is not None: + ocp_date = certificate_add_options.ocp_date + + # Construct URL + url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(certificate, 'CertificateAddParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add.metadata = {'url': '/certificates'} + + def list( + self, certificate_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the Certificates that have been added to the specified + Account. + + :param certificate_list_options: Additional parameters for the + operation + :type certificate_list_options: + ~azure.batch.models.CertificateListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of Certificate + :rtype: + ~azure.batch.models.CertificatePaged[~azure.batch.models.Certificate] + :raises: + :class:`BatchErrorException` + """ + filter = None + if certificate_list_options is not None: + filter = certificate_list_options.filter + select = None + if certificate_list_options is not None: + select = certificate_list_options.select + max_results = None + if certificate_list_options is not None: + max_results = certificate_list_options.max_results + timeout = None + if certificate_list_options is not None: + timeout = certificate_list_options.timeout + client_request_id = None + if certificate_list_options is not None: + client_request_id = certificate_list_options.client_request_id + return_client_request_id = None + if certificate_list_options is not None: + return_client_request_id = certificate_list_options.return_client_request_id + ocp_date = None + if certificate_list_options is not None: + ocp_date = certificate_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CertificatePaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CertificatePaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/certificates'} + + def cancel_deletion( + self, thumbprint_algorithm, thumbprint, certificate_cancel_deletion_options=None, custom_headers=None, raw=False, **operation_config): + """Cancels a failed deletion of a Certificate from the specified Account. + + If you try to delete a Certificate that is being used by a Pool or + Compute Node, the status of the Certificate changes to deleteFailed. If + you decide that you want to continue using the Certificate, you can use + this operation to set the status of the Certificate back to active. If + you intend to delete the Certificate, you do not need to run this + operation after the deletion failed. You must make sure that the + Certificate is not being used by any resources, and then you can try + again to delete the Certificate. + + :param thumbprint_algorithm: The algorithm used to derive the + thumbprint parameter. This must be sha1. + :type thumbprint_algorithm: str + :param thumbprint: The thumbprint of the Certificate being deleted. + :type thumbprint: str + :param certificate_cancel_deletion_options: Additional parameters for + the operation + :type certificate_cancel_deletion_options: + ~azure.batch.models.CertificateCancelDeletionOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if certificate_cancel_deletion_options is not None: + timeout = certificate_cancel_deletion_options.timeout + client_request_id = None + if certificate_cancel_deletion_options is not None: + client_request_id = certificate_cancel_deletion_options.client_request_id + return_client_request_id = None + if certificate_cancel_deletion_options is not None: + return_client_request_id = certificate_cancel_deletion_options.return_client_request_id + ocp_date = None + if certificate_cancel_deletion_options is not None: + ocp_date = certificate_cancel_deletion_options.ocp_date + + # Construct URL + url = self.cancel_deletion.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'thumbprintAlgorithm': self._serialize.url("thumbprint_algorithm", thumbprint_algorithm, 'str'), + 'thumbprint': self._serialize.url("thumbprint", thumbprint, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + cancel_deletion.metadata = {'url': '/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})/canceldelete'} + + def delete( + self, thumbprint_algorithm, thumbprint, certificate_delete_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a Certificate from the specified Account. + + You cannot delete a Certificate if a resource (Pool or Compute Node) is + using it. Before you can delete a Certificate, you must therefore make + sure that the Certificate is not associated with any existing Pools, + the Certificate is not installed on any Nodes (even if you remove a + Certificate from a Pool, it is not removed from existing Compute Nodes + in that Pool until they restart), and no running Tasks depend on the + Certificate. If you try to delete a Certificate that is in use, the + deletion fails. The Certificate status changes to deleteFailed. You can + use Cancel Delete Certificate to set the status back to active if you + decide that you want to continue using the Certificate. + + :param thumbprint_algorithm: The algorithm used to derive the + thumbprint parameter. This must be sha1. + :type thumbprint_algorithm: str + :param thumbprint: The thumbprint of the Certificate to be deleted. + :type thumbprint: str + :param certificate_delete_options: Additional parameters for the + operation + :type certificate_delete_options: + ~azure.batch.models.CertificateDeleteOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if certificate_delete_options is not None: + timeout = certificate_delete_options.timeout + client_request_id = None + if certificate_delete_options is not None: + client_request_id = certificate_delete_options.client_request_id + return_client_request_id = None + if certificate_delete_options is not None: + return_client_request_id = certificate_delete_options.return_client_request_id + ocp_date = None + if certificate_delete_options is not None: + ocp_date = certificate_delete_options.ocp_date + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'thumbprintAlgorithm': self._serialize.url("thumbprint_algorithm", thumbprint_algorithm, 'str'), + 'thumbprint': self._serialize.url("thumbprint", thumbprint, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + }) + return client_raw_response + delete.metadata = {'url': '/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})'} + + def get( + self, thumbprint_algorithm, thumbprint, certificate_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified Certificate. + + :param thumbprint_algorithm: The algorithm used to derive the + thumbprint parameter. This must be sha1. + :type thumbprint_algorithm: str + :param thumbprint: The thumbprint of the Certificate to get. + :type thumbprint: str + :param certificate_get_options: Additional parameters for the + operation + :type certificate_get_options: + ~azure.batch.models.CertificateGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: Certificate or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.Certificate or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if certificate_get_options is not None: + select = certificate_get_options.select + timeout = None + if certificate_get_options is not None: + timeout = certificate_get_options.timeout + client_request_id = None + if certificate_get_options is not None: + client_request_id = certificate_get_options.client_request_id + return_client_request_id = None + if certificate_get_options is not None: + return_client_request_id = certificate_get_options.return_client_request_id + ocp_date = None + if certificate_get_options is not None: + ocp_date = certificate_get_options.ocp_date + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'thumbprintAlgorithm': self._serialize.url("thumbprint_algorithm", thumbprint_algorithm, 'str'), + 'thumbprint': self._serialize.url("thumbprint", thumbprint, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('Certificate', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})'} diff --git a/azext/generated/sdk/batch/v2019_06_01/operations/compute_node_operations.py b/azext/generated/sdk/batch/v2019_06_01/operations/compute_node_operations.py new file mode 100644 index 00000000..6d21f23f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/operations/compute_node_operations.py @@ -0,0 +1,1242 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class ComputeNodeOperations(object): + """ComputeNodeOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2019-06-01.9.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2019-06-01.9.0" + + self.config = config + + def add_user( + self, pool_id, node_id, user, compute_node_add_user_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a user Account to the specified Compute Node. + + You can add a user Account to a Compute Node only when it is in the + idle or running state. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the machine on which you want to create a + user Account. + :type node_id: str + :param user: The user Account to be created. + :type user: ~azure.batch.models.ComputeNodeUser + :param compute_node_add_user_options: Additional parameters for the + operation + :type compute_node_add_user_options: + ~azure.batch.models.ComputeNodeAddUserOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_add_user_options is not None: + timeout = compute_node_add_user_options.timeout + client_request_id = None + if compute_node_add_user_options is not None: + client_request_id = compute_node_add_user_options.client_request_id + return_client_request_id = None + if compute_node_add_user_options is not None: + return_client_request_id = compute_node_add_user_options.return_client_request_id + ocp_date = None + if compute_node_add_user_options is not None: + ocp_date = compute_node_add_user_options.ocp_date + + # Construct URL + url = self.add_user.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(user, 'ComputeNodeUser') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add_user.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/users'} + + def delete_user( + self, pool_id, node_id, user_name, compute_node_delete_user_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a user Account from the specified Compute Node. + + You can delete a user Account to a Compute Node only when it is in the + idle or running state. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the machine on which you want to delete a + user Account. + :type node_id: str + :param user_name: The name of the user Account to delete. + :type user_name: str + :param compute_node_delete_user_options: Additional parameters for the + operation + :type compute_node_delete_user_options: + ~azure.batch.models.ComputeNodeDeleteUserOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_delete_user_options is not None: + timeout = compute_node_delete_user_options.timeout + client_request_id = None + if compute_node_delete_user_options is not None: + client_request_id = compute_node_delete_user_options.client_request_id + return_client_request_id = None + if compute_node_delete_user_options is not None: + return_client_request_id = compute_node_delete_user_options.return_client_request_id + ocp_date = None + if compute_node_delete_user_options is not None: + ocp_date = compute_node_delete_user_options.ocp_date + + # Construct URL + url = self.delete_user.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str'), + 'userName': self._serialize.url("user_name", user_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete_user.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/users/{userName}'} + + def update_user( + self, pool_id, node_id, user_name, node_update_user_parameter, compute_node_update_user_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the password and expiration time of a user Account on the + specified Compute Node. + + This operation replaces of all the updatable properties of the Account. + For example, if the expiryTime element is not specified, the current + value is replaced with the default value, not left unmodified. You can + update a user Account on a Compute Node only when it is in the idle or + running state. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the machine on which you want to update a + user Account. + :type node_id: str + :param user_name: The name of the user Account to update. + :type user_name: str + :param node_update_user_parameter: The parameters for the request. + :type node_update_user_parameter: + ~azure.batch.models.NodeUpdateUserParameter + :param compute_node_update_user_options: Additional parameters for the + operation + :type compute_node_update_user_options: + ~azure.batch.models.ComputeNodeUpdateUserOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_update_user_options is not None: + timeout = compute_node_update_user_options.timeout + client_request_id = None + if compute_node_update_user_options is not None: + client_request_id = compute_node_update_user_options.client_request_id + return_client_request_id = None + if compute_node_update_user_options is not None: + return_client_request_id = compute_node_update_user_options.return_client_request_id + ocp_date = None + if compute_node_update_user_options is not None: + ocp_date = compute_node_update_user_options.ocp_date + + # Construct URL + url = self.update_user.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str'), + 'userName': self._serialize.url("user_name", user_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(node_update_user_parameter, 'NodeUpdateUserParameter') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + update_user.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/users/{userName}'} + + def get( + self, pool_id, node_id, compute_node_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified Compute Node. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node that you want to get + information about. + :type node_id: str + :param compute_node_get_options: Additional parameters for the + operation + :type compute_node_get_options: + ~azure.batch.models.ComputeNodeGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ComputeNode or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.ComputeNode or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if compute_node_get_options is not None: + select = compute_node_get_options.select + timeout = None + if compute_node_get_options is not None: + timeout = compute_node_get_options.timeout + client_request_id = None + if compute_node_get_options is not None: + client_request_id = compute_node_get_options.client_request_id + return_client_request_id = None + if compute_node_get_options is not None: + return_client_request_id = compute_node_get_options.return_client_request_id + ocp_date = None + if compute_node_get_options is not None: + ocp_date = compute_node_get_options.ocp_date + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('ComputeNode', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}'} + + def reboot( + self, pool_id, node_id, node_reboot_option=None, compute_node_reboot_options=None, custom_headers=None, raw=False, **operation_config): + """Restarts the specified Compute Node. + + You can restart a Compute Node only if it is in an idle or running + state. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node that you want to restart. + :type node_id: str + :param node_reboot_option: When to reboot the Compute Node and what to + do with currently running Tasks. The default value is requeue. + Possible values include: 'requeue', 'terminate', 'taskCompletion', + 'retainedData' + :type node_reboot_option: str or + ~azure.batch.models.ComputeNodeRebootOption + :param compute_node_reboot_options: Additional parameters for the + operation + :type compute_node_reboot_options: + ~azure.batch.models.ComputeNodeRebootOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_reboot_options is not None: + timeout = compute_node_reboot_options.timeout + client_request_id = None + if compute_node_reboot_options is not None: + client_request_id = compute_node_reboot_options.client_request_id + return_client_request_id = None + if compute_node_reboot_options is not None: + return_client_request_id = compute_node_reboot_options.return_client_request_id + ocp_date = None + if compute_node_reboot_options is not None: + ocp_date = compute_node_reboot_options.ocp_date + node_reboot_parameter = None + if node_reboot_option is not None: + node_reboot_parameter = models.NodeRebootParameter(node_reboot_option=node_reboot_option) + + # Construct URL + url = self.reboot.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + if node_reboot_parameter is not None: + body_content = self._serialize.body(node_reboot_parameter, 'NodeRebootParameter') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + reboot.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/reboot'} + + def reimage( + self, pool_id, node_id, node_reimage_option=None, compute_node_reimage_options=None, custom_headers=None, raw=False, **operation_config): + """Reinstalls the operating system on the specified Compute Node. + + You can reinstall the operating system on a Compute Node only if it is + in an idle or running state. This API can be invoked only on Pools + created with the cloud service configuration property. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node that you want to restart. + :type node_id: str + :param node_reimage_option: When to reimage the Compute Node and what + to do with currently running Tasks. The default value is requeue. + Possible values include: 'requeue', 'terminate', 'taskCompletion', + 'retainedData' + :type node_reimage_option: str or + ~azure.batch.models.ComputeNodeReimageOption + :param compute_node_reimage_options: Additional parameters for the + operation + :type compute_node_reimage_options: + ~azure.batch.models.ComputeNodeReimageOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_reimage_options is not None: + timeout = compute_node_reimage_options.timeout + client_request_id = None + if compute_node_reimage_options is not None: + client_request_id = compute_node_reimage_options.client_request_id + return_client_request_id = None + if compute_node_reimage_options is not None: + return_client_request_id = compute_node_reimage_options.return_client_request_id + ocp_date = None + if compute_node_reimage_options is not None: + ocp_date = compute_node_reimage_options.ocp_date + node_reimage_parameter = None + if node_reimage_option is not None: + node_reimage_parameter = models.NodeReimageParameter(node_reimage_option=node_reimage_option) + + # Construct URL + url = self.reimage.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + if node_reimage_parameter is not None: + body_content = self._serialize.body(node_reimage_parameter, 'NodeReimageParameter') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + reimage.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/reimage'} + + def disable_scheduling( + self, pool_id, node_id, node_disable_scheduling_option=None, compute_node_disable_scheduling_options=None, custom_headers=None, raw=False, **operation_config): + """Disables Task scheduling on the specified Compute Node. + + You can disable Task scheduling on a Compute Node only if its current + scheduling state is enabled. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node on which you want to + disable Task scheduling. + :type node_id: str + :param node_disable_scheduling_option: What to do with currently + running Tasks when disabling Task scheduling on the Compute Node. The + default value is requeue. Possible values include: 'requeue', + 'terminate', 'taskCompletion' + :type node_disable_scheduling_option: str or + ~azure.batch.models.DisableComputeNodeSchedulingOption + :param compute_node_disable_scheduling_options: Additional parameters + for the operation + :type compute_node_disable_scheduling_options: + ~azure.batch.models.ComputeNodeDisableSchedulingOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_disable_scheduling_options is not None: + timeout = compute_node_disable_scheduling_options.timeout + client_request_id = None + if compute_node_disable_scheduling_options is not None: + client_request_id = compute_node_disable_scheduling_options.client_request_id + return_client_request_id = None + if compute_node_disable_scheduling_options is not None: + return_client_request_id = compute_node_disable_scheduling_options.return_client_request_id + ocp_date = None + if compute_node_disable_scheduling_options is not None: + ocp_date = compute_node_disable_scheduling_options.ocp_date + node_disable_scheduling_parameter = None + if node_disable_scheduling_option is not None: + node_disable_scheduling_parameter = models.NodeDisableSchedulingParameter(node_disable_scheduling_option=node_disable_scheduling_option) + + # Construct URL + url = self.disable_scheduling.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + if node_disable_scheduling_parameter is not None: + body_content = self._serialize.body(node_disable_scheduling_parameter, 'NodeDisableSchedulingParameter') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + disable_scheduling.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/disablescheduling'} + + def enable_scheduling( + self, pool_id, node_id, compute_node_enable_scheduling_options=None, custom_headers=None, raw=False, **operation_config): + """Enables Task scheduling on the specified Compute Node. + + You can enable Task scheduling on a Compute Node only if its current + scheduling state is disabled. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node on which you want to enable + Task scheduling. + :type node_id: str + :param compute_node_enable_scheduling_options: Additional parameters + for the operation + :type compute_node_enable_scheduling_options: + ~azure.batch.models.ComputeNodeEnableSchedulingOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_enable_scheduling_options is not None: + timeout = compute_node_enable_scheduling_options.timeout + client_request_id = None + if compute_node_enable_scheduling_options is not None: + client_request_id = compute_node_enable_scheduling_options.client_request_id + return_client_request_id = None + if compute_node_enable_scheduling_options is not None: + return_client_request_id = compute_node_enable_scheduling_options.return_client_request_id + ocp_date = None + if compute_node_enable_scheduling_options is not None: + ocp_date = compute_node_enable_scheduling_options.ocp_date + + # Construct URL + url = self.enable_scheduling.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + enable_scheduling.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/enablescheduling'} + + def get_remote_login_settings( + self, pool_id, node_id, compute_node_get_remote_login_settings_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the settings required for remote login to a Compute Node. + + Before you can remotely login to a Compute Node using the remote login + settings, you must create a user Account on the Compute Node. This API + can be invoked only on Pools created with the virtual machine + configuration property. For Pools created with a cloud service + configuration, see the GetRemoteDesktop API. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node for which to obtain the + remote login settings. + :type node_id: str + :param compute_node_get_remote_login_settings_options: Additional + parameters for the operation + :type compute_node_get_remote_login_settings_options: + ~azure.batch.models.ComputeNodeGetRemoteLoginSettingsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ComputeNodeGetRemoteLoginSettingsResult or ClientRawResponse + if raw=true + :rtype: ~azure.batch.models.ComputeNodeGetRemoteLoginSettingsResult or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_get_remote_login_settings_options is not None: + timeout = compute_node_get_remote_login_settings_options.timeout + client_request_id = None + if compute_node_get_remote_login_settings_options is not None: + client_request_id = compute_node_get_remote_login_settings_options.client_request_id + return_client_request_id = None + if compute_node_get_remote_login_settings_options is not None: + return_client_request_id = compute_node_get_remote_login_settings_options.return_client_request_id + ocp_date = None + if compute_node_get_remote_login_settings_options is not None: + ocp_date = compute_node_get_remote_login_settings_options.ocp_date + + # Construct URL + url = self.get_remote_login_settings.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('ComputeNodeGetRemoteLoginSettingsResult', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_remote_login_settings.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/remoteloginsettings'} + + def get_remote_desktop( + self, pool_id, node_id, compute_node_get_remote_desktop_options=None, custom_headers=None, raw=False, callback=None, **operation_config): + """Gets the Remote Desktop Protocol file for the specified Compute Node. + + Before you can access a Compute Node by using the RDP file, you must + create a user Account on the Compute Node. This API can only be invoked + on Pools created with a cloud service configuration. For Pools created + with a virtual machine configuration, see the GetRemoteLoginSettings + API. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node for which you want to get + the Remote Desktop Protocol file. + :type node_id: str + :param compute_node_get_remote_desktop_options: Additional parameters + for the operation + :type compute_node_get_remote_desktop_options: + ~azure.batch.models.ComputeNodeGetRemoteDesktopOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param callback: When specified, will be called with each chunk of + data that is streamed. The callback should take two arguments, the + bytes of the current chunk of data and the response object. If the + data is uploading, response will be None. + :type callback: Callable[Bytes, response=None] + :param operation_config: :ref:`Operation configuration + overrides`. + :return: object or ClientRawResponse if raw=true + :rtype: Generator or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_get_remote_desktop_options is not None: + timeout = compute_node_get_remote_desktop_options.timeout + client_request_id = None + if compute_node_get_remote_desktop_options is not None: + client_request_id = compute_node_get_remote_desktop_options.client_request_id + return_client_request_id = None + if compute_node_get_remote_desktop_options is not None: + return_client_request_id = compute_node_get_remote_desktop_options.return_client_request_id + ocp_date = None + if compute_node_get_remote_desktop_options is not None: + ocp_date = compute_node_get_remote_desktop_options.ocp_date + + # Construct URL + url = self.get_remote_desktop.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=True, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._client.stream_download(response, callback) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_remote_desktop.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/rdp'} + + def upload_batch_service_logs( + self, pool_id, node_id, upload_batch_service_logs_configuration, compute_node_upload_batch_service_logs_options=None, custom_headers=None, raw=False, **operation_config): + """Upload Azure Batch service log files from the specified Compute Node to + Azure Blob Storage. + + This is for gathering Azure Batch service log files in an automated + fashion from Compute Nodes if you are experiencing an error and wish to + escalate to Azure support. The Azure Batch service log files should be + shared with Azure support to aid in debugging issues with the Batch + service. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node from which you want to + upload the Azure Batch service log files. + :type node_id: str + :param upload_batch_service_logs_configuration: The Azure Batch + service log files upload configuration. + :type upload_batch_service_logs_configuration: + ~azure.batch.models.UploadBatchServiceLogsConfiguration + :param compute_node_upload_batch_service_logs_options: Additional + parameters for the operation + :type compute_node_upload_batch_service_logs_options: + ~azure.batch.models.ComputeNodeUploadBatchServiceLogsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: UploadBatchServiceLogsResult or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.UploadBatchServiceLogsResult or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_upload_batch_service_logs_options is not None: + timeout = compute_node_upload_batch_service_logs_options.timeout + client_request_id = None + if compute_node_upload_batch_service_logs_options is not None: + client_request_id = compute_node_upload_batch_service_logs_options.client_request_id + return_client_request_id = None + if compute_node_upload_batch_service_logs_options is not None: + return_client_request_id = compute_node_upload_batch_service_logs_options.return_client_request_id + ocp_date = None + if compute_node_upload_batch_service_logs_options is not None: + ocp_date = compute_node_upload_batch_service_logs_options.ocp_date + + # Construct URL + url = self.upload_batch_service_logs.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(upload_batch_service_logs_configuration, 'UploadBatchServiceLogsConfiguration') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('UploadBatchServiceLogsResult', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + upload_batch_service_logs.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/uploadbatchservicelogs'} + + def list( + self, pool_id, compute_node_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists the Compute Nodes in the specified Pool. + + :param pool_id: The ID of the Pool from which you want to list Compute + Nodes. + :type pool_id: str + :param compute_node_list_options: Additional parameters for the + operation + :type compute_node_list_options: + ~azure.batch.models.ComputeNodeListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of ComputeNode + :rtype: + ~azure.batch.models.ComputeNodePaged[~azure.batch.models.ComputeNode] + :raises: + :class:`BatchErrorException` + """ + filter = None + if compute_node_list_options is not None: + filter = compute_node_list_options.filter + select = None + if compute_node_list_options is not None: + select = compute_node_list_options.select + max_results = None + if compute_node_list_options is not None: + max_results = compute_node_list_options.max_results + timeout = None + if compute_node_list_options is not None: + timeout = compute_node_list_options.timeout + client_request_id = None + if compute_node_list_options is not None: + client_request_id = compute_node_list_options.client_request_id + return_client_request_id = None + if compute_node_list_options is not None: + return_client_request_id = compute_node_list_options.return_client_request_id + ocp_date = None + if compute_node_list_options is not None: + ocp_date = compute_node_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.ComputeNodePaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.ComputeNodePaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/pools/{poolId}/nodes'} diff --git a/azext/generated/sdk/batch/v2019_06_01/operations/file_operations.py b/azext/generated/sdk/batch/v2019_06_01/operations/file_operations.py new file mode 100644 index 00000000..181c8619 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/operations/file_operations.py @@ -0,0 +1,898 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class FileOperations(object): + """FileOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2019-06-01.9.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2019-06-01.9.0" + + self.config = config + + def delete_from_task( + self, job_id, task_id, file_path, recursive=None, file_delete_from_task_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes the specified Task file from the Compute Node where the Task + ran. + + :param job_id: The ID of the Job that contains the Task. + :type job_id: str + :param task_id: The ID of the Task whose file you want to delete. + :type task_id: str + :param file_path: The path to the Task file or directory that you want + to delete. + :type file_path: str + :param recursive: Whether to delete children of a directory. If the + filePath parameter represents a directory instead of a file, you can + set recursive to true to delete the directory and all of the files and + subdirectories in it. If recursive is false then the directory must be + empty or deletion will fail. + :type recursive: bool + :param file_delete_from_task_options: Additional parameters for the + operation + :type file_delete_from_task_options: + ~azure.batch.models.FileDeleteFromTaskOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_delete_from_task_options is not None: + timeout = file_delete_from_task_options.timeout + client_request_id = None + if file_delete_from_task_options is not None: + client_request_id = file_delete_from_task_options.client_request_id + return_client_request_id = None + if file_delete_from_task_options is not None: + return_client_request_id = file_delete_from_task_options.return_client_request_id + ocp_date = None + if file_delete_from_task_options is not None: + ocp_date = file_delete_from_task_options.ocp_date + + # Construct URL + url = self.delete_from_task.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if recursive is not None: + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete_from_task.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/files/{filePath}'} + + def get_from_task( + self, job_id, task_id, file_path, file_get_from_task_options=None, custom_headers=None, raw=False, callback=None, **operation_config): + """Returns the content of the specified Task file. + + :param job_id: The ID of the Job that contains the Task. + :type job_id: str + :param task_id: The ID of the Task whose file you want to retrieve. + :type task_id: str + :param file_path: The path to the Task file that you want to get the + content of. + :type file_path: str + :param file_get_from_task_options: Additional parameters for the + operation + :type file_get_from_task_options: + ~azure.batch.models.FileGetFromTaskOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param callback: When specified, will be called with each chunk of + data that is streamed. The callback should take two arguments, the + bytes of the current chunk of data and the response object. If the + data is uploading, response will be None. + :type callback: Callable[Bytes, response=None] + :param operation_config: :ref:`Operation configuration + overrides`. + :return: object or ClientRawResponse if raw=true + :rtype: Generator or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_get_from_task_options is not None: + timeout = file_get_from_task_options.timeout + client_request_id = None + if file_get_from_task_options is not None: + client_request_id = file_get_from_task_options.client_request_id + return_client_request_id = None + if file_get_from_task_options is not None: + return_client_request_id = file_get_from_task_options.return_client_request_id + ocp_date = None + if file_get_from_task_options is not None: + ocp_date = file_get_from_task_options.ocp_date + ocp_range = None + if file_get_from_task_options is not None: + ocp_range = file_get_from_task_options.ocp_range + if_modified_since = None + if file_get_from_task_options is not None: + if_modified_since = file_get_from_task_options.if_modified_since + if_unmodified_since = None + if file_get_from_task_options is not None: + if_unmodified_since = file_get_from_task_options.if_unmodified_since + + # Construct URL + url = self.get_from_task.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if ocp_range is not None: + header_parameters['ocp-range'] = self._serialize.header("ocp_range", ocp_range, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=True, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._client.stream_download(response, callback) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'ocp-creation-time': 'rfc-1123', + 'ocp-batch-file-isdirectory': 'bool', + 'ocp-batch-file-url': 'str', + 'ocp-batch-file-mode': 'str', + 'Content-Type': 'str', + 'Content-Length': 'long', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_from_task.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/files/{filePath}'} + + def get_properties_from_task( + self, job_id, task_id, file_path, file_get_properties_from_task_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the properties of the specified Task file. + + :param job_id: The ID of the Job that contains the Task. + :type job_id: str + :param task_id: The ID of the Task whose file you want to get the + properties of. + :type task_id: str + :param file_path: The path to the Task file that you want to get the + properties of. + :type file_path: str + :param file_get_properties_from_task_options: Additional parameters + for the operation + :type file_get_properties_from_task_options: + ~azure.batch.models.FileGetPropertiesFromTaskOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_get_properties_from_task_options is not None: + timeout = file_get_properties_from_task_options.timeout + client_request_id = None + if file_get_properties_from_task_options is not None: + client_request_id = file_get_properties_from_task_options.client_request_id + return_client_request_id = None + if file_get_properties_from_task_options is not None: + return_client_request_id = file_get_properties_from_task_options.return_client_request_id + ocp_date = None + if file_get_properties_from_task_options is not None: + ocp_date = file_get_properties_from_task_options.ocp_date + if_modified_since = None + if file_get_properties_from_task_options is not None: + if_modified_since = file_get_properties_from_task_options.if_modified_since + if_unmodified_since = None + if file_get_properties_from_task_options is not None: + if_unmodified_since = file_get_properties_from_task_options.if_unmodified_since + + # Construct URL + url = self.get_properties_from_task.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'ocp-creation-time': 'rfc-1123', + 'ocp-batch-file-isdirectory': 'bool', + 'ocp-batch-file-url': 'str', + 'ocp-batch-file-mode': 'str', + 'Content-Type': 'str', + 'Content-Length': 'long', + }) + return client_raw_response + get_properties_from_task.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/files/{filePath}'} + + def delete_from_compute_node( + self, pool_id, node_id, file_path, recursive=None, file_delete_from_compute_node_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes the specified file from the Compute Node. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node from which you want to + delete the file. + :type node_id: str + :param file_path: The path to the file or directory that you want to + delete. + :type file_path: str + :param recursive: Whether to delete children of a directory. If the + filePath parameter represents a directory instead of a file, you can + set recursive to true to delete the directory and all of the files and + subdirectories in it. If recursive is false then the directory must be + empty or deletion will fail. + :type recursive: bool + :param file_delete_from_compute_node_options: Additional parameters + for the operation + :type file_delete_from_compute_node_options: + ~azure.batch.models.FileDeleteFromComputeNodeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_delete_from_compute_node_options is not None: + timeout = file_delete_from_compute_node_options.timeout + client_request_id = None + if file_delete_from_compute_node_options is not None: + client_request_id = file_delete_from_compute_node_options.client_request_id + return_client_request_id = None + if file_delete_from_compute_node_options is not None: + return_client_request_id = file_delete_from_compute_node_options.return_client_request_id + ocp_date = None + if file_delete_from_compute_node_options is not None: + ocp_date = file_delete_from_compute_node_options.ocp_date + + # Construct URL + url = self.delete_from_compute_node.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if recursive is not None: + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete_from_compute_node.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/files/{filePath}'} + + def get_from_compute_node( + self, pool_id, node_id, file_path, file_get_from_compute_node_options=None, custom_headers=None, raw=False, callback=None, **operation_config): + """Returns the content of the specified Compute Node file. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node that contains the file. + :type node_id: str + :param file_path: The path to the Compute Node file that you want to + get the content of. + :type file_path: str + :param file_get_from_compute_node_options: Additional parameters for + the operation + :type file_get_from_compute_node_options: + ~azure.batch.models.FileGetFromComputeNodeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param callback: When specified, will be called with each chunk of + data that is streamed. The callback should take two arguments, the + bytes of the current chunk of data and the response object. If the + data is uploading, response will be None. + :type callback: Callable[Bytes, response=None] + :param operation_config: :ref:`Operation configuration + overrides`. + :return: object or ClientRawResponse if raw=true + :rtype: Generator or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_get_from_compute_node_options is not None: + timeout = file_get_from_compute_node_options.timeout + client_request_id = None + if file_get_from_compute_node_options is not None: + client_request_id = file_get_from_compute_node_options.client_request_id + return_client_request_id = None + if file_get_from_compute_node_options is not None: + return_client_request_id = file_get_from_compute_node_options.return_client_request_id + ocp_date = None + if file_get_from_compute_node_options is not None: + ocp_date = file_get_from_compute_node_options.ocp_date + ocp_range = None + if file_get_from_compute_node_options is not None: + ocp_range = file_get_from_compute_node_options.ocp_range + if_modified_since = None + if file_get_from_compute_node_options is not None: + if_modified_since = file_get_from_compute_node_options.if_modified_since + if_unmodified_since = None + if file_get_from_compute_node_options is not None: + if_unmodified_since = file_get_from_compute_node_options.if_unmodified_since + + # Construct URL + url = self.get_from_compute_node.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if ocp_range is not None: + header_parameters['ocp-range'] = self._serialize.header("ocp_range", ocp_range, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=True, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._client.stream_download(response, callback) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'ocp-creation-time': 'rfc-1123', + 'ocp-batch-file-isdirectory': 'bool', + 'ocp-batch-file-url': 'str', + 'ocp-batch-file-mode': 'str', + 'Content-Type': 'str', + 'Content-Length': 'long', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_from_compute_node.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/files/{filePath}'} + + def get_properties_from_compute_node( + self, pool_id, node_id, file_path, file_get_properties_from_compute_node_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the properties of the specified Compute Node file. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node that contains the file. + :type node_id: str + :param file_path: The path to the Compute Node file that you want to + get the properties of. + :type file_path: str + :param file_get_properties_from_compute_node_options: Additional + parameters for the operation + :type file_get_properties_from_compute_node_options: + ~azure.batch.models.FileGetPropertiesFromComputeNodeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_get_properties_from_compute_node_options is not None: + timeout = file_get_properties_from_compute_node_options.timeout + client_request_id = None + if file_get_properties_from_compute_node_options is not None: + client_request_id = file_get_properties_from_compute_node_options.client_request_id + return_client_request_id = None + if file_get_properties_from_compute_node_options is not None: + return_client_request_id = file_get_properties_from_compute_node_options.return_client_request_id + ocp_date = None + if file_get_properties_from_compute_node_options is not None: + ocp_date = file_get_properties_from_compute_node_options.ocp_date + if_modified_since = None + if file_get_properties_from_compute_node_options is not None: + if_modified_since = file_get_properties_from_compute_node_options.if_modified_since + if_unmodified_since = None + if file_get_properties_from_compute_node_options is not None: + if_unmodified_since = file_get_properties_from_compute_node_options.if_unmodified_since + + # Construct URL + url = self.get_properties_from_compute_node.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'ocp-creation-time': 'rfc-1123', + 'ocp-batch-file-isdirectory': 'bool', + 'ocp-batch-file-url': 'str', + 'ocp-batch-file-mode': 'str', + 'Content-Type': 'str', + 'Content-Length': 'long', + }) + return client_raw_response + get_properties_from_compute_node.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/files/{filePath}'} + + def list_from_task( + self, job_id, task_id, recursive=None, file_list_from_task_options=None, custom_headers=None, raw=False, **operation_config): + """Lists the files in a Task's directory on its Compute Node. + + :param job_id: The ID of the Job that contains the Task. + :type job_id: str + :param task_id: The ID of the Task whose files you want to list. + :type task_id: str + :param recursive: Whether to list children of the Task directory. This + parameter can be used in combination with the filter parameter to list + specific type of files. + :type recursive: bool + :param file_list_from_task_options: Additional parameters for the + operation + :type file_list_from_task_options: + ~azure.batch.models.FileListFromTaskOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of NodeFile + :rtype: + ~azure.batch.models.NodeFilePaged[~azure.batch.models.NodeFile] + :raises: + :class:`BatchErrorException` + """ + filter = None + if file_list_from_task_options is not None: + filter = file_list_from_task_options.filter + max_results = None + if file_list_from_task_options is not None: + max_results = file_list_from_task_options.max_results + timeout = None + if file_list_from_task_options is not None: + timeout = file_list_from_task_options.timeout + client_request_id = None + if file_list_from_task_options is not None: + client_request_id = file_list_from_task_options.client_request_id + return_client_request_id = None + if file_list_from_task_options is not None: + return_client_request_id = file_list_from_task_options.return_client_request_id + ocp_date = None + if file_list_from_task_options is not None: + ocp_date = file_list_from_task_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_from_task.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if recursive is not None: + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.NodeFilePaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.NodeFilePaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_from_task.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/files'} + + def list_from_compute_node( + self, pool_id, node_id, recursive=None, file_list_from_compute_node_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the files in Task directories on the specified Compute + Node. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node whose files you want to + list. + :type node_id: str + :param recursive: Whether to list children of a directory. + :type recursive: bool + :param file_list_from_compute_node_options: Additional parameters for + the operation + :type file_list_from_compute_node_options: + ~azure.batch.models.FileListFromComputeNodeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of NodeFile + :rtype: + ~azure.batch.models.NodeFilePaged[~azure.batch.models.NodeFile] + :raises: + :class:`BatchErrorException` + """ + filter = None + if file_list_from_compute_node_options is not None: + filter = file_list_from_compute_node_options.filter + max_results = None + if file_list_from_compute_node_options is not None: + max_results = file_list_from_compute_node_options.max_results + timeout = None + if file_list_from_compute_node_options is not None: + timeout = file_list_from_compute_node_options.timeout + client_request_id = None + if file_list_from_compute_node_options is not None: + client_request_id = file_list_from_compute_node_options.client_request_id + return_client_request_id = None + if file_list_from_compute_node_options is not None: + return_client_request_id = file_list_from_compute_node_options.return_client_request_id + ocp_date = None + if file_list_from_compute_node_options is not None: + ocp_date = file_list_from_compute_node_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_from_compute_node.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if recursive is not None: + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.NodeFilePaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.NodeFilePaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_from_compute_node.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/files'} diff --git a/azext/generated/sdk/batch/v2019_06_01/operations/job_operations.py b/azext/generated/sdk/batch/v2019_06_01/operations/job_operations.py new file mode 100644 index 00000000..692e3e80 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/operations/job_operations.py @@ -0,0 +1,1439 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class JobOperations(object): + """JobOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2019-06-01.9.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2019-06-01.9.0" + + self.config = config + + def get_all_lifetime_statistics( + self, job_get_all_lifetime_statistics_options=None, custom_headers=None, raw=False, **operation_config): + """Gets lifetime summary statistics for all of the Jobs in the specified + Account. + + Statistics are aggregated across all Jobs that have ever existed in the + Account, from Account creation to the last update time of the + statistics. The statistics may not be immediately available. The Batch + service performs periodic roll-up of statistics. The typical delay is + about 30 minutes. + + :param job_get_all_lifetime_statistics_options: Additional parameters + for the operation + :type job_get_all_lifetime_statistics_options: + ~azure.batch.models.JobGetAllLifetimeStatisticsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: JobStatistics or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.JobStatistics or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_get_all_lifetime_statistics_options is not None: + timeout = job_get_all_lifetime_statistics_options.timeout + client_request_id = None + if job_get_all_lifetime_statistics_options is not None: + client_request_id = job_get_all_lifetime_statistics_options.client_request_id + return_client_request_id = None + if job_get_all_lifetime_statistics_options is not None: + return_client_request_id = job_get_all_lifetime_statistics_options.return_client_request_id + ocp_date = None + if job_get_all_lifetime_statistics_options is not None: + ocp_date = job_get_all_lifetime_statistics_options.ocp_date + + # Construct URL + url = self.get_all_lifetime_statistics.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('JobStatistics', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_all_lifetime_statistics.metadata = {'url': '/lifetimejobstats'} + + def delete( + self, job_id, job_delete_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a Job. + + Deleting a Job also deletes all Tasks that are part of that Job, and + all Job statistics. This also overrides the retention period for Task + data; that is, if the Job contains Tasks which are still retained on + Compute Nodes, the Batch services deletes those Tasks' working + directories and all their contents. When a Delete Job request is + received, the Batch service sets the Job to the deleting state. All + update operations on a Job that is in deleting state will fail with + status code 409 (Conflict), with additional information indicating that + the Job is being deleted. + + :param job_id: The ID of the Job to delete. + :type job_id: str + :param job_delete_options: Additional parameters for the operation + :type job_delete_options: ~azure.batch.models.JobDeleteOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_delete_options is not None: + timeout = job_delete_options.timeout + client_request_id = None + if job_delete_options is not None: + client_request_id = job_delete_options.client_request_id + return_client_request_id = None + if job_delete_options is not None: + return_client_request_id = job_delete_options.return_client_request_id + ocp_date = None + if job_delete_options is not None: + ocp_date = job_delete_options.ocp_date + if_match = None + if job_delete_options is not None: + if_match = job_delete_options.if_match + if_none_match = None + if job_delete_options is not None: + if_none_match = job_delete_options.if_none_match + if_modified_since = None + if job_delete_options is not None: + if_modified_since = job_delete_options.if_modified_since + if_unmodified_since = None + if job_delete_options is not None: + if_unmodified_since = job_delete_options.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete.metadata = {'url': '/jobs/{jobId}'} + + def get( + self, job_id, job_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified Job. + + :param job_id: The ID of the Job. + :type job_id: str + :param job_get_options: Additional parameters for the operation + :type job_get_options: ~azure.batch.models.JobGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: CloudJob or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.CloudJob or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if job_get_options is not None: + select = job_get_options.select + expand = None + if job_get_options is not None: + expand = job_get_options.expand + timeout = None + if job_get_options is not None: + timeout = job_get_options.timeout + client_request_id = None + if job_get_options is not None: + client_request_id = job_get_options.client_request_id + return_client_request_id = None + if job_get_options is not None: + return_client_request_id = job_get_options.return_client_request_id + ocp_date = None + if job_get_options is not None: + ocp_date = job_get_options.ocp_date + if_match = None + if job_get_options is not None: + if_match = job_get_options.if_match + if_none_match = None + if job_get_options is not None: + if_none_match = job_get_options.if_none_match + if_modified_since = None + if job_get_options is not None: + if_modified_since = job_get_options.if_modified_since + if_unmodified_since = None + if job_get_options is not None: + if_unmodified_since = job_get_options.if_unmodified_since + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('CloudJob', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/jobs/{jobId}'} + + def patch( + self, job_id, job_patch_parameter, job_patch_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified Job. + + This replaces only the Job properties specified in the request. For + example, if the Job has constraints, and a request does not specify the + constraints element, then the Job keeps the existing constraints. + + :param job_id: The ID of the Job whose properties you want to update. + :type job_id: str + :param job_patch_parameter: The parameters for the request. + :type job_patch_parameter: ~azure.batch.models.JobPatchParameter + :param job_patch_options: Additional parameters for the operation + :type job_patch_options: ~azure.batch.models.JobPatchOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_patch_options is not None: + timeout = job_patch_options.timeout + client_request_id = None + if job_patch_options is not None: + client_request_id = job_patch_options.client_request_id + return_client_request_id = None + if job_patch_options is not None: + return_client_request_id = job_patch_options.return_client_request_id + ocp_date = None + if job_patch_options is not None: + ocp_date = job_patch_options.ocp_date + if_match = None + if job_patch_options is not None: + if_match = job_patch_options.if_match + if_none_match = None + if job_patch_options is not None: + if_none_match = job_patch_options.if_none_match + if_modified_since = None + if job_patch_options is not None: + if_modified_since = job_patch_options.if_modified_since + if_unmodified_since = None + if job_patch_options is not None: + if_unmodified_since = job_patch_options.if_unmodified_since + + # Construct URL + url = self.patch.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job_patch_parameter, 'JobPatchParameter') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + patch.metadata = {'url': '/jobs/{jobId}'} + + def update( + self, job_id, job_update_parameter, job_update_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified Job. + + This fully replaces all the updatable properties of the Job. For + example, if the Job has constraints associated with it and if + constraints is not specified with this request, then the Batch service + will remove the existing constraints. + + :param job_id: The ID of the Job whose properties you want to update. + :type job_id: str + :param job_update_parameter: The parameters for the request. + :type job_update_parameter: ~azure.batch.models.JobUpdateParameter + :param job_update_options: Additional parameters for the operation + :type job_update_options: ~azure.batch.models.JobUpdateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_update_options is not None: + timeout = job_update_options.timeout + client_request_id = None + if job_update_options is not None: + client_request_id = job_update_options.client_request_id + return_client_request_id = None + if job_update_options is not None: + return_client_request_id = job_update_options.return_client_request_id + ocp_date = None + if job_update_options is not None: + ocp_date = job_update_options.ocp_date + if_match = None + if job_update_options is not None: + if_match = job_update_options.if_match + if_none_match = None + if job_update_options is not None: + if_none_match = job_update_options.if_none_match + if_modified_since = None + if job_update_options is not None: + if_modified_since = job_update_options.if_modified_since + if_unmodified_since = None + if job_update_options is not None: + if_unmodified_since = job_update_options.if_unmodified_since + + # Construct URL + url = self.update.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job_update_parameter, 'JobUpdateParameter') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + update.metadata = {'url': '/jobs/{jobId}'} + + def disable( + self, job_id, disable_tasks, job_disable_options=None, custom_headers=None, raw=False, **operation_config): + """Disables the specified Job, preventing new Tasks from running. + + The Batch Service immediately moves the Job to the disabling state. + Batch then uses the disableTasks parameter to determine what to do with + the currently running Tasks of the Job. The Job remains in the + disabling state until the disable operation is completed and all Tasks + have been dealt with according to the disableTasks option; the Job then + moves to the disabled state. No new Tasks are started under the Job + until it moves back to active state. If you try to disable a Job that + is in any state other than active, disabling, or disabled, the request + fails with status code 409. + + :param job_id: The ID of the Job to disable. + :type job_id: str + :param disable_tasks: What to do with active Tasks associated with the + Job. Possible values include: 'requeue', 'terminate', 'wait' + :type disable_tasks: str or ~azure.batch.models.DisableJobOption + :param job_disable_options: Additional parameters for the operation + :type job_disable_options: ~azure.batch.models.JobDisableOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_disable_options is not None: + timeout = job_disable_options.timeout + client_request_id = None + if job_disable_options is not None: + client_request_id = job_disable_options.client_request_id + return_client_request_id = None + if job_disable_options is not None: + return_client_request_id = job_disable_options.return_client_request_id + ocp_date = None + if job_disable_options is not None: + ocp_date = job_disable_options.ocp_date + if_match = None + if job_disable_options is not None: + if_match = job_disable_options.if_match + if_none_match = None + if job_disable_options is not None: + if_none_match = job_disable_options.if_none_match + if_modified_since = None + if job_disable_options is not None: + if_modified_since = job_disable_options.if_modified_since + if_unmodified_since = None + if job_disable_options is not None: + if_unmodified_since = job_disable_options.if_unmodified_since + job_disable_parameter = models.JobDisableParameter(disable_tasks=disable_tasks) + + # Construct URL + url = self.disable.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job_disable_parameter, 'JobDisableParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + disable.metadata = {'url': '/jobs/{jobId}/disable'} + + def enable( + self, job_id, job_enable_options=None, custom_headers=None, raw=False, **operation_config): + """Enables the specified Job, allowing new Tasks to run. + + When you call this API, the Batch service sets a disabled Job to the + enabling state. After the this operation is completed, the Job moves to + the active state, and scheduling of new Tasks under the Job resumes. + The Batch service does not allow a Task to remain in the active state + for more than 180 days. Therefore, if you enable a Job containing + active Tasks which were added more than 180 days ago, those Tasks will + not run. + + :param job_id: The ID of the Job to enable. + :type job_id: str + :param job_enable_options: Additional parameters for the operation + :type job_enable_options: ~azure.batch.models.JobEnableOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_enable_options is not None: + timeout = job_enable_options.timeout + client_request_id = None + if job_enable_options is not None: + client_request_id = job_enable_options.client_request_id + return_client_request_id = None + if job_enable_options is not None: + return_client_request_id = job_enable_options.return_client_request_id + ocp_date = None + if job_enable_options is not None: + ocp_date = job_enable_options.ocp_date + if_match = None + if job_enable_options is not None: + if_match = job_enable_options.if_match + if_none_match = None + if job_enable_options is not None: + if_none_match = job_enable_options.if_none_match + if_modified_since = None + if job_enable_options is not None: + if_modified_since = job_enable_options.if_modified_since + if_unmodified_since = None + if job_enable_options is not None: + if_unmodified_since = job_enable_options.if_unmodified_since + + # Construct URL + url = self.enable.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + enable.metadata = {'url': '/jobs/{jobId}/enable'} + + def terminate( + self, job_id, terminate_reason=None, job_terminate_options=None, custom_headers=None, raw=False, **operation_config): + """Terminates the specified Job, marking it as completed. + + When a Terminate Job request is received, the Batch service sets the + Job to the terminating state. The Batch service then terminates any + running Tasks associated with the Job and runs any required Job release + Tasks. Then the Job moves into the completed state. If there are any + Tasks in the Job in the active state, they will remain in the active + state. Once a Job is terminated, new Tasks cannot be added and any + remaining active Tasks will not be scheduled. + + :param job_id: The ID of the Job to terminate. + :type job_id: str + :param terminate_reason: The text you want to appear as the Job's + TerminateReason. The default is 'UserTerminate'. + :type terminate_reason: str + :param job_terminate_options: Additional parameters for the operation + :type job_terminate_options: ~azure.batch.models.JobTerminateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_terminate_options is not None: + timeout = job_terminate_options.timeout + client_request_id = None + if job_terminate_options is not None: + client_request_id = job_terminate_options.client_request_id + return_client_request_id = None + if job_terminate_options is not None: + return_client_request_id = job_terminate_options.return_client_request_id + ocp_date = None + if job_terminate_options is not None: + ocp_date = job_terminate_options.ocp_date + if_match = None + if job_terminate_options is not None: + if_match = job_terminate_options.if_match + if_none_match = None + if job_terminate_options is not None: + if_none_match = job_terminate_options.if_none_match + if_modified_since = None + if job_terminate_options is not None: + if_modified_since = job_terminate_options.if_modified_since + if_unmodified_since = None + if job_terminate_options is not None: + if_unmodified_since = job_terminate_options.if_unmodified_since + job_terminate_parameter = None + if terminate_reason is not None: + job_terminate_parameter = models.JobTerminateParameter(terminate_reason=terminate_reason) + + # Construct URL + url = self.terminate.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + if job_terminate_parameter is not None: + body_content = self._serialize.body(job_terminate_parameter, 'JobTerminateParameter') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + terminate.metadata = {'url': '/jobs/{jobId}/terminate'} + + def add( + self, job, job_add_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a Job to the specified Account. + + The Batch service supports two ways to control the work done as part of + a Job. In the first approach, the user specifies a Job Manager Task. + The Batch service launches this Task when it is ready to start the Job. + The Job Manager Task controls all other Tasks that run under this Job, + by using the Task APIs. In the second approach, the user directly + controls the execution of Tasks under an active Job, by using the Task + APIs. Also note: when naming Jobs, avoid including sensitive + information such as user names or secret project names. This + information may appear in telemetry logs accessible to Microsoft + Support engineers. + + :param job: The Job to be added. + :type job: ~azure.batch.models.JobAddParameter + :param job_add_options: Additional parameters for the operation + :type job_add_options: ~azure.batch.models.JobAddOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_add_options is not None: + timeout = job_add_options.timeout + client_request_id = None + if job_add_options is not None: + client_request_id = job_add_options.client_request_id + return_client_request_id = None + if job_add_options is not None: + return_client_request_id = job_add_options.return_client_request_id + ocp_date = None + if job_add_options is not None: + ocp_date = job_add_options.ocp_date + + # Construct URL + url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job, 'JobAddParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add.metadata = {'url': '/jobs'} + + def list( + self, job_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the Jobs in the specified Account. + + :param job_list_options: Additional parameters for the operation + :type job_list_options: ~azure.batch.models.JobListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of CloudJob + :rtype: + ~azure.batch.models.CloudJobPaged[~azure.batch.models.CloudJob] + :raises: + :class:`BatchErrorException` + """ + filter = None + if job_list_options is not None: + filter = job_list_options.filter + select = None + if job_list_options is not None: + select = job_list_options.select + expand = None + if job_list_options is not None: + expand = job_list_options.expand + max_results = None + if job_list_options is not None: + max_results = job_list_options.max_results + timeout = None + if job_list_options is not None: + timeout = job_list_options.timeout + client_request_id = None + if job_list_options is not None: + client_request_id = job_list_options.client_request_id + return_client_request_id = None + if job_list_options is not None: + return_client_request_id = job_list_options.return_client_request_id + ocp_date = None + if job_list_options is not None: + ocp_date = job_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CloudJobPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CloudJobPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/jobs'} + + def list_from_job_schedule( + self, job_schedule_id, job_list_from_job_schedule_options=None, custom_headers=None, raw=False, **operation_config): + """Lists the Jobs that have been created under the specified Job Schedule. + + :param job_schedule_id: The ID of the Job Schedule from which you want + to get a list of Jobs. + :type job_schedule_id: str + :param job_list_from_job_schedule_options: Additional parameters for + the operation + :type job_list_from_job_schedule_options: + ~azure.batch.models.JobListFromJobScheduleOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of CloudJob + :rtype: + ~azure.batch.models.CloudJobPaged[~azure.batch.models.CloudJob] + :raises: + :class:`BatchErrorException` + """ + filter = None + if job_list_from_job_schedule_options is not None: + filter = job_list_from_job_schedule_options.filter + select = None + if job_list_from_job_schedule_options is not None: + select = job_list_from_job_schedule_options.select + expand = None + if job_list_from_job_schedule_options is not None: + expand = job_list_from_job_schedule_options.expand + max_results = None + if job_list_from_job_schedule_options is not None: + max_results = job_list_from_job_schedule_options.max_results + timeout = None + if job_list_from_job_schedule_options is not None: + timeout = job_list_from_job_schedule_options.timeout + client_request_id = None + if job_list_from_job_schedule_options is not None: + client_request_id = job_list_from_job_schedule_options.client_request_id + return_client_request_id = None + if job_list_from_job_schedule_options is not None: + return_client_request_id = job_list_from_job_schedule_options.return_client_request_id + ocp_date = None + if job_list_from_job_schedule_options is not None: + ocp_date = job_list_from_job_schedule_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_from_job_schedule.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CloudJobPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CloudJobPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_from_job_schedule.metadata = {'url': '/jobschedules/{jobScheduleId}/jobs'} + + def list_preparation_and_release_task_status( + self, job_id, job_list_preparation_and_release_task_status_options=None, custom_headers=None, raw=False, **operation_config): + """Lists the execution status of the Job Preparation and Job Release Task + for the specified Job across the Compute Nodes where the Job has run. + + This API returns the Job Preparation and Job Release Task status on all + Compute Nodes that have run the Job Preparation or Job Release Task. + This includes Compute Nodes which have since been removed from the + Pool. If this API is invoked on a Job which has no Job Preparation or + Job Release Task, the Batch service returns HTTP status code 409 + (Conflict) with an error code of JobPreparationTaskNotSpecified. + + :param job_id: The ID of the Job. + :type job_id: str + :param job_list_preparation_and_release_task_status_options: + Additional parameters for the operation + :type job_list_preparation_and_release_task_status_options: + ~azure.batch.models.JobListPreparationAndReleaseTaskStatusOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of + JobPreparationAndReleaseTaskExecutionInformation + :rtype: + ~azure.batch.models.JobPreparationAndReleaseTaskExecutionInformationPaged[~azure.batch.models.JobPreparationAndReleaseTaskExecutionInformation] + :raises: + :class:`BatchErrorException` + """ + filter = None + if job_list_preparation_and_release_task_status_options is not None: + filter = job_list_preparation_and_release_task_status_options.filter + select = None + if job_list_preparation_and_release_task_status_options is not None: + select = job_list_preparation_and_release_task_status_options.select + max_results = None + if job_list_preparation_and_release_task_status_options is not None: + max_results = job_list_preparation_and_release_task_status_options.max_results + timeout = None + if job_list_preparation_and_release_task_status_options is not None: + timeout = job_list_preparation_and_release_task_status_options.timeout + client_request_id = None + if job_list_preparation_and_release_task_status_options is not None: + client_request_id = job_list_preparation_and_release_task_status_options.client_request_id + return_client_request_id = None + if job_list_preparation_and_release_task_status_options is not None: + return_client_request_id = job_list_preparation_and_release_task_status_options.return_client_request_id + ocp_date = None + if job_list_preparation_and_release_task_status_options is not None: + ocp_date = job_list_preparation_and_release_task_status_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_preparation_and_release_task_status.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.JobPreparationAndReleaseTaskExecutionInformationPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.JobPreparationAndReleaseTaskExecutionInformationPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_preparation_and_release_task_status.metadata = {'url': '/jobs/{jobId}/jobpreparationandreleasetaskstatus'} + + def get_task_counts( + self, job_id, job_get_task_counts_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the Task counts for the specified Job. + + Task counts provide a count of the Tasks by active, running or + completed Task state, and a count of Tasks which succeeded or failed. + Tasks in the preparing state are counted as running. + + :param job_id: The ID of the Job. + :type job_id: str + :param job_get_task_counts_options: Additional parameters for the + operation + :type job_get_task_counts_options: + ~azure.batch.models.JobGetTaskCountsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: TaskCounts or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.TaskCounts or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_get_task_counts_options is not None: + timeout = job_get_task_counts_options.timeout + client_request_id = None + if job_get_task_counts_options is not None: + client_request_id = job_get_task_counts_options.client_request_id + return_client_request_id = None + if job_get_task_counts_options is not None: + return_client_request_id = job_get_task_counts_options.return_client_request_id + ocp_date = None + if job_get_task_counts_options is not None: + ocp_date = job_get_task_counts_options.ocp_date + + # Construct URL + url = self.get_task_counts.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('TaskCounts', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_task_counts.metadata = {'url': '/jobs/{jobId}/taskcounts'} diff --git a/azext/generated/sdk/batch/v2019_06_01/operations/job_schedule_operations.py b/azext/generated/sdk/batch/v2019_06_01/operations/job_schedule_operations.py new file mode 100644 index 00000000..eacd6f00 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/operations/job_schedule_operations.py @@ -0,0 +1,1093 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class JobScheduleOperations(object): + """JobScheduleOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2019-06-01.9.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2019-06-01.9.0" + + self.config = config + + def exists( + self, job_schedule_id, job_schedule_exists_options=None, custom_headers=None, raw=False, **operation_config): + """Checks the specified Job Schedule exists. + + :param job_schedule_id: The ID of the Job Schedule which you want to + check. + :type job_schedule_id: str + :param job_schedule_exists_options: Additional parameters for the + operation + :type job_schedule_exists_options: + ~azure.batch.models.JobScheduleExistsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: bool or ClientRawResponse if raw=true + :rtype: bool or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_exists_options is not None: + timeout = job_schedule_exists_options.timeout + client_request_id = None + if job_schedule_exists_options is not None: + client_request_id = job_schedule_exists_options.client_request_id + return_client_request_id = None + if job_schedule_exists_options is not None: + return_client_request_id = job_schedule_exists_options.return_client_request_id + ocp_date = None + if job_schedule_exists_options is not None: + ocp_date = job_schedule_exists_options.ocp_date + if_match = None + if job_schedule_exists_options is not None: + if_match = job_schedule_exists_options.if_match + if_none_match = None + if job_schedule_exists_options is not None: + if_none_match = job_schedule_exists_options.if_none_match + if_modified_since = None + if job_schedule_exists_options is not None: + if_modified_since = job_schedule_exists_options.if_modified_since + if_unmodified_since = None + if job_schedule_exists_options is not None: + if_unmodified_since = job_schedule_exists_options.if_unmodified_since + + # Construct URL + url = self.exists.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 404]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = (response.status_code == 200) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + }) + return client_raw_response + return deserialized + exists.metadata = {'url': '/jobschedules/{jobScheduleId}'} + + def delete( + self, job_schedule_id, job_schedule_delete_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a Job Schedule from the specified Account. + + When you delete a Job Schedule, this also deletes all Jobs and Tasks + under that schedule. When Tasks are deleted, all the files in their + working directories on the Compute Nodes are also deleted (the + retention period is ignored). The Job Schedule statistics are no longer + accessible once the Job Schedule is deleted, though they are still + counted towards Account lifetime statistics. + + :param job_schedule_id: The ID of the Job Schedule to delete. + :type job_schedule_id: str + :param job_schedule_delete_options: Additional parameters for the + operation + :type job_schedule_delete_options: + ~azure.batch.models.JobScheduleDeleteOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_delete_options is not None: + timeout = job_schedule_delete_options.timeout + client_request_id = None + if job_schedule_delete_options is not None: + client_request_id = job_schedule_delete_options.client_request_id + return_client_request_id = None + if job_schedule_delete_options is not None: + return_client_request_id = job_schedule_delete_options.return_client_request_id + ocp_date = None + if job_schedule_delete_options is not None: + ocp_date = job_schedule_delete_options.ocp_date + if_match = None + if job_schedule_delete_options is not None: + if_match = job_schedule_delete_options.if_match + if_none_match = None + if job_schedule_delete_options is not None: + if_none_match = job_schedule_delete_options.if_none_match + if_modified_since = None + if job_schedule_delete_options is not None: + if_modified_since = job_schedule_delete_options.if_modified_since + if_unmodified_since = None + if job_schedule_delete_options is not None: + if_unmodified_since = job_schedule_delete_options.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete.metadata = {'url': '/jobschedules/{jobScheduleId}'} + + def get( + self, job_schedule_id, job_schedule_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified Job Schedule. + + :param job_schedule_id: The ID of the Job Schedule to get. + :type job_schedule_id: str + :param job_schedule_get_options: Additional parameters for the + operation + :type job_schedule_get_options: + ~azure.batch.models.JobScheduleGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: CloudJobSchedule or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.CloudJobSchedule or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if job_schedule_get_options is not None: + select = job_schedule_get_options.select + expand = None + if job_schedule_get_options is not None: + expand = job_schedule_get_options.expand + timeout = None + if job_schedule_get_options is not None: + timeout = job_schedule_get_options.timeout + client_request_id = None + if job_schedule_get_options is not None: + client_request_id = job_schedule_get_options.client_request_id + return_client_request_id = None + if job_schedule_get_options is not None: + return_client_request_id = job_schedule_get_options.return_client_request_id + ocp_date = None + if job_schedule_get_options is not None: + ocp_date = job_schedule_get_options.ocp_date + if_match = None + if job_schedule_get_options is not None: + if_match = job_schedule_get_options.if_match + if_none_match = None + if job_schedule_get_options is not None: + if_none_match = job_schedule_get_options.if_none_match + if_modified_since = None + if job_schedule_get_options is not None: + if_modified_since = job_schedule_get_options.if_modified_since + if_unmodified_since = None + if job_schedule_get_options is not None: + if_unmodified_since = job_schedule_get_options.if_unmodified_since + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('CloudJobSchedule', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/jobschedules/{jobScheduleId}'} + + def patch( + self, job_schedule_id, job_schedule_patch_parameter, job_schedule_patch_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified Job Schedule. + + This replaces only the Job Schedule properties specified in the + request. For example, if the schedule property is not specified with + this request, then the Batch service will keep the existing schedule. + Changes to a Job Schedule only impact Jobs created by the schedule + after the update has taken place; currently running Jobs are + unaffected. + + :param job_schedule_id: The ID of the Job Schedule to update. + :type job_schedule_id: str + :param job_schedule_patch_parameter: The parameters for the request. + :type job_schedule_patch_parameter: + ~azure.batch.models.JobSchedulePatchParameter + :param job_schedule_patch_options: Additional parameters for the + operation + :type job_schedule_patch_options: + ~azure.batch.models.JobSchedulePatchOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_patch_options is not None: + timeout = job_schedule_patch_options.timeout + client_request_id = None + if job_schedule_patch_options is not None: + client_request_id = job_schedule_patch_options.client_request_id + return_client_request_id = None + if job_schedule_patch_options is not None: + return_client_request_id = job_schedule_patch_options.return_client_request_id + ocp_date = None + if job_schedule_patch_options is not None: + ocp_date = job_schedule_patch_options.ocp_date + if_match = None + if job_schedule_patch_options is not None: + if_match = job_schedule_patch_options.if_match + if_none_match = None + if job_schedule_patch_options is not None: + if_none_match = job_schedule_patch_options.if_none_match + if_modified_since = None + if job_schedule_patch_options is not None: + if_modified_since = job_schedule_patch_options.if_modified_since + if_unmodified_since = None + if job_schedule_patch_options is not None: + if_unmodified_since = job_schedule_patch_options.if_unmodified_since + + # Construct URL + url = self.patch.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job_schedule_patch_parameter, 'JobSchedulePatchParameter') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + patch.metadata = {'url': '/jobschedules/{jobScheduleId}'} + + def update( + self, job_schedule_id, job_schedule_update_parameter, job_schedule_update_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified Job Schedule. + + This fully replaces all the updatable properties of the Job Schedule. + For example, if the schedule property is not specified with this + request, then the Batch service will remove the existing schedule. + Changes to a Job Schedule only impact Jobs created by the schedule + after the update has taken place; currently running Jobs are + unaffected. + + :param job_schedule_id: The ID of the Job Schedule to update. + :type job_schedule_id: str + :param job_schedule_update_parameter: The parameters for the request. + :type job_schedule_update_parameter: + ~azure.batch.models.JobScheduleUpdateParameter + :param job_schedule_update_options: Additional parameters for the + operation + :type job_schedule_update_options: + ~azure.batch.models.JobScheduleUpdateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_update_options is not None: + timeout = job_schedule_update_options.timeout + client_request_id = None + if job_schedule_update_options is not None: + client_request_id = job_schedule_update_options.client_request_id + return_client_request_id = None + if job_schedule_update_options is not None: + return_client_request_id = job_schedule_update_options.return_client_request_id + ocp_date = None + if job_schedule_update_options is not None: + ocp_date = job_schedule_update_options.ocp_date + if_match = None + if job_schedule_update_options is not None: + if_match = job_schedule_update_options.if_match + if_none_match = None + if job_schedule_update_options is not None: + if_none_match = job_schedule_update_options.if_none_match + if_modified_since = None + if job_schedule_update_options is not None: + if_modified_since = job_schedule_update_options.if_modified_since + if_unmodified_since = None + if job_schedule_update_options is not None: + if_unmodified_since = job_schedule_update_options.if_unmodified_since + + # Construct URL + url = self.update.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job_schedule_update_parameter, 'JobScheduleUpdateParameter') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + update.metadata = {'url': '/jobschedules/{jobScheduleId}'} + + def disable( + self, job_schedule_id, job_schedule_disable_options=None, custom_headers=None, raw=False, **operation_config): + """Disables a Job Schedule. + + No new Jobs will be created until the Job Schedule is enabled again. + + :param job_schedule_id: The ID of the Job Schedule to disable. + :type job_schedule_id: str + :param job_schedule_disable_options: Additional parameters for the + operation + :type job_schedule_disable_options: + ~azure.batch.models.JobScheduleDisableOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_disable_options is not None: + timeout = job_schedule_disable_options.timeout + client_request_id = None + if job_schedule_disable_options is not None: + client_request_id = job_schedule_disable_options.client_request_id + return_client_request_id = None + if job_schedule_disable_options is not None: + return_client_request_id = job_schedule_disable_options.return_client_request_id + ocp_date = None + if job_schedule_disable_options is not None: + ocp_date = job_schedule_disable_options.ocp_date + if_match = None + if job_schedule_disable_options is not None: + if_match = job_schedule_disable_options.if_match + if_none_match = None + if job_schedule_disable_options is not None: + if_none_match = job_schedule_disable_options.if_none_match + if_modified_since = None + if job_schedule_disable_options is not None: + if_modified_since = job_schedule_disable_options.if_modified_since + if_unmodified_since = None + if job_schedule_disable_options is not None: + if_unmodified_since = job_schedule_disable_options.if_unmodified_since + + # Construct URL + url = self.disable.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + disable.metadata = {'url': '/jobschedules/{jobScheduleId}/disable'} + + def enable( + self, job_schedule_id, job_schedule_enable_options=None, custom_headers=None, raw=False, **operation_config): + """Enables a Job Schedule. + + :param job_schedule_id: The ID of the Job Schedule to enable. + :type job_schedule_id: str + :param job_schedule_enable_options: Additional parameters for the + operation + :type job_schedule_enable_options: + ~azure.batch.models.JobScheduleEnableOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_enable_options is not None: + timeout = job_schedule_enable_options.timeout + client_request_id = None + if job_schedule_enable_options is not None: + client_request_id = job_schedule_enable_options.client_request_id + return_client_request_id = None + if job_schedule_enable_options is not None: + return_client_request_id = job_schedule_enable_options.return_client_request_id + ocp_date = None + if job_schedule_enable_options is not None: + ocp_date = job_schedule_enable_options.ocp_date + if_match = None + if job_schedule_enable_options is not None: + if_match = job_schedule_enable_options.if_match + if_none_match = None + if job_schedule_enable_options is not None: + if_none_match = job_schedule_enable_options.if_none_match + if_modified_since = None + if job_schedule_enable_options is not None: + if_modified_since = job_schedule_enable_options.if_modified_since + if_unmodified_since = None + if job_schedule_enable_options is not None: + if_unmodified_since = job_schedule_enable_options.if_unmodified_since + + # Construct URL + url = self.enable.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + enable.metadata = {'url': '/jobschedules/{jobScheduleId}/enable'} + + def terminate( + self, job_schedule_id, job_schedule_terminate_options=None, custom_headers=None, raw=False, **operation_config): + """Terminates a Job Schedule. + + :param job_schedule_id: The ID of the Job Schedule to terminates. + :type job_schedule_id: str + :param job_schedule_terminate_options: Additional parameters for the + operation + :type job_schedule_terminate_options: + ~azure.batch.models.JobScheduleTerminateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_terminate_options is not None: + timeout = job_schedule_terminate_options.timeout + client_request_id = None + if job_schedule_terminate_options is not None: + client_request_id = job_schedule_terminate_options.client_request_id + return_client_request_id = None + if job_schedule_terminate_options is not None: + return_client_request_id = job_schedule_terminate_options.return_client_request_id + ocp_date = None + if job_schedule_terminate_options is not None: + ocp_date = job_schedule_terminate_options.ocp_date + if_match = None + if job_schedule_terminate_options is not None: + if_match = job_schedule_terminate_options.if_match + if_none_match = None + if job_schedule_terminate_options is not None: + if_none_match = job_schedule_terminate_options.if_none_match + if_modified_since = None + if job_schedule_terminate_options is not None: + if_modified_since = job_schedule_terminate_options.if_modified_since + if_unmodified_since = None + if job_schedule_terminate_options is not None: + if_unmodified_since = job_schedule_terminate_options.if_unmodified_since + + # Construct URL + url = self.terminate.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + terminate.metadata = {'url': '/jobschedules/{jobScheduleId}/terminate'} + + def add( + self, cloud_job_schedule, job_schedule_add_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a Job Schedule to the specified Account. + + :param cloud_job_schedule: The Job Schedule to be added. + :type cloud_job_schedule: ~azure.batch.models.JobScheduleAddParameter + :param job_schedule_add_options: Additional parameters for the + operation + :type job_schedule_add_options: + ~azure.batch.models.JobScheduleAddOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_add_options is not None: + timeout = job_schedule_add_options.timeout + client_request_id = None + if job_schedule_add_options is not None: + client_request_id = job_schedule_add_options.client_request_id + return_client_request_id = None + if job_schedule_add_options is not None: + return_client_request_id = job_schedule_add_options.return_client_request_id + ocp_date = None + if job_schedule_add_options is not None: + ocp_date = job_schedule_add_options.ocp_date + + # Construct URL + url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(cloud_job_schedule, 'JobScheduleAddParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add.metadata = {'url': '/jobschedules'} + + def list( + self, job_schedule_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the Job Schedules in the specified Account. + + :param job_schedule_list_options: Additional parameters for the + operation + :type job_schedule_list_options: + ~azure.batch.models.JobScheduleListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of CloudJobSchedule + :rtype: + ~azure.batch.models.CloudJobSchedulePaged[~azure.batch.models.CloudJobSchedule] + :raises: + :class:`BatchErrorException` + """ + filter = None + if job_schedule_list_options is not None: + filter = job_schedule_list_options.filter + select = None + if job_schedule_list_options is not None: + select = job_schedule_list_options.select + expand = None + if job_schedule_list_options is not None: + expand = job_schedule_list_options.expand + max_results = None + if job_schedule_list_options is not None: + max_results = job_schedule_list_options.max_results + timeout = None + if job_schedule_list_options is not None: + timeout = job_schedule_list_options.timeout + client_request_id = None + if job_schedule_list_options is not None: + client_request_id = job_schedule_list_options.client_request_id + return_client_request_id = None + if job_schedule_list_options is not None: + return_client_request_id = job_schedule_list_options.return_client_request_id + ocp_date = None + if job_schedule_list_options is not None: + ocp_date = job_schedule_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CloudJobSchedulePaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CloudJobSchedulePaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/jobschedules'} diff --git a/azext/generated/sdk/batch/v2019_06_01/operations/pool_operations.py b/azext/generated/sdk/batch/v2019_06_01/operations/pool_operations.py new file mode 100644 index 00000000..b9b3860e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/operations/pool_operations.py @@ -0,0 +1,1635 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class PoolOperations(object): + """PoolOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2019-06-01.9.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2019-06-01.9.0" + + self.config = config + + def list_usage_metrics( + self, pool_list_usage_metrics_options=None, custom_headers=None, raw=False, **operation_config): + """Lists the usage metrics, aggregated by Pool across individual time + intervals, for the specified Account. + + If you do not specify a $filter clause including a poolId, the response + includes all Pools that existed in the Account in the time range of the + returned aggregation intervals. If you do not specify a $filter clause + including a startTime or endTime these filters default to the start and + end times of the last aggregation interval currently available; that + is, only the last aggregation interval is returned. + + :param pool_list_usage_metrics_options: Additional parameters for the + operation + :type pool_list_usage_metrics_options: + ~azure.batch.models.PoolListUsageMetricsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of PoolUsageMetrics + :rtype: + ~azure.batch.models.PoolUsageMetricsPaged[~azure.batch.models.PoolUsageMetrics] + :raises: + :class:`BatchErrorException` + """ + start_time = None + if pool_list_usage_metrics_options is not None: + start_time = pool_list_usage_metrics_options.start_time + end_time = None + if pool_list_usage_metrics_options is not None: + end_time = pool_list_usage_metrics_options.end_time + filter = None + if pool_list_usage_metrics_options is not None: + filter = pool_list_usage_metrics_options.filter + max_results = None + if pool_list_usage_metrics_options is not None: + max_results = pool_list_usage_metrics_options.max_results + timeout = None + if pool_list_usage_metrics_options is not None: + timeout = pool_list_usage_metrics_options.timeout + client_request_id = None + if pool_list_usage_metrics_options is not None: + client_request_id = pool_list_usage_metrics_options.client_request_id + return_client_request_id = None + if pool_list_usage_metrics_options is not None: + return_client_request_id = pool_list_usage_metrics_options.return_client_request_id + ocp_date = None + if pool_list_usage_metrics_options is not None: + ocp_date = pool_list_usage_metrics_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_usage_metrics.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if start_time is not None: + query_parameters['starttime'] = self._serialize.query("start_time", start_time, 'iso-8601') + if end_time is not None: + query_parameters['endtime'] = self._serialize.query("end_time", end_time, 'iso-8601') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.PoolUsageMetricsPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.PoolUsageMetricsPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_usage_metrics.metadata = {'url': '/poolusagemetrics'} + + def get_all_lifetime_statistics( + self, pool_get_all_lifetime_statistics_options=None, custom_headers=None, raw=False, **operation_config): + """Gets lifetime summary statistics for all of the Pools in the specified + Account. + + Statistics are aggregated across all Pools that have ever existed in + the Account, from Account creation to the last update time of the + statistics. The statistics may not be immediately available. The Batch + service performs periodic roll-up of statistics. The typical delay is + about 30 minutes. + + :param pool_get_all_lifetime_statistics_options: Additional parameters + for the operation + :type pool_get_all_lifetime_statistics_options: + ~azure.batch.models.PoolGetAllLifetimeStatisticsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PoolStatistics or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.PoolStatistics or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_get_all_lifetime_statistics_options is not None: + timeout = pool_get_all_lifetime_statistics_options.timeout + client_request_id = None + if pool_get_all_lifetime_statistics_options is not None: + client_request_id = pool_get_all_lifetime_statistics_options.client_request_id + return_client_request_id = None + if pool_get_all_lifetime_statistics_options is not None: + return_client_request_id = pool_get_all_lifetime_statistics_options.return_client_request_id + ocp_date = None + if pool_get_all_lifetime_statistics_options is not None: + ocp_date = pool_get_all_lifetime_statistics_options.ocp_date + + # Construct URL + url = self.get_all_lifetime_statistics.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('PoolStatistics', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_all_lifetime_statistics.metadata = {'url': '/lifetimepoolstats'} + + def add( + self, pool, pool_add_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a Pool to the specified Account. + + When naming Pools, avoid including sensitive information such as user + names or secret project names. This information may appear in telemetry + logs accessible to Microsoft Support engineers. + + :param pool: The Pool to be added. + :type pool: ~azure.batch.models.PoolAddParameter + :param pool_add_options: Additional parameters for the operation + :type pool_add_options: ~azure.batch.models.PoolAddOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_add_options is not None: + timeout = pool_add_options.timeout + client_request_id = None + if pool_add_options is not None: + client_request_id = pool_add_options.client_request_id + return_client_request_id = None + if pool_add_options is not None: + return_client_request_id = pool_add_options.return_client_request_id + ocp_date = None + if pool_add_options is not None: + ocp_date = pool_add_options.ocp_date + + # Construct URL + url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool, 'PoolAddParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add.metadata = {'url': '/pools'} + + def list( + self, pool_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the Pools in the specified Account. + + :param pool_list_options: Additional parameters for the operation + :type pool_list_options: ~azure.batch.models.PoolListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of CloudPool + :rtype: + ~azure.batch.models.CloudPoolPaged[~azure.batch.models.CloudPool] + :raises: + :class:`BatchErrorException` + """ + filter = None + if pool_list_options is not None: + filter = pool_list_options.filter + select = None + if pool_list_options is not None: + select = pool_list_options.select + expand = None + if pool_list_options is not None: + expand = pool_list_options.expand + max_results = None + if pool_list_options is not None: + max_results = pool_list_options.max_results + timeout = None + if pool_list_options is not None: + timeout = pool_list_options.timeout + client_request_id = None + if pool_list_options is not None: + client_request_id = pool_list_options.client_request_id + return_client_request_id = None + if pool_list_options is not None: + return_client_request_id = pool_list_options.return_client_request_id + ocp_date = None + if pool_list_options is not None: + ocp_date = pool_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CloudPoolPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CloudPoolPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/pools'} + + def delete( + self, pool_id, pool_delete_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a Pool from the specified Account. + + When you request that a Pool be deleted, the following actions occur: + the Pool state is set to deleting; any ongoing resize operation on the + Pool are stopped; the Batch service starts resizing the Pool to zero + Compute Nodes; any Tasks running on existing Compute Nodes are + terminated and requeued (as if a resize Pool operation had been + requested with the default requeue option); finally, the Pool is + removed from the system. Because running Tasks are requeued, the user + can rerun these Tasks by updating their Job to target a different Pool. + The Tasks can then run on the new Pool. If you want to override the + requeue behavior, then you should call resize Pool explicitly to shrink + the Pool to zero size before deleting the Pool. If you call an Update, + Patch or Delete API on a Pool in the deleting state, it will fail with + HTTP status code 409 with error code PoolBeingDeleted. + + :param pool_id: The ID of the Pool to delete. + :type pool_id: str + :param pool_delete_options: Additional parameters for the operation + :type pool_delete_options: ~azure.batch.models.PoolDeleteOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_delete_options is not None: + timeout = pool_delete_options.timeout + client_request_id = None + if pool_delete_options is not None: + client_request_id = pool_delete_options.client_request_id + return_client_request_id = None + if pool_delete_options is not None: + return_client_request_id = pool_delete_options.return_client_request_id + ocp_date = None + if pool_delete_options is not None: + ocp_date = pool_delete_options.ocp_date + if_match = None + if pool_delete_options is not None: + if_match = pool_delete_options.if_match + if_none_match = None + if pool_delete_options is not None: + if_none_match = pool_delete_options.if_none_match + if_modified_since = None + if pool_delete_options is not None: + if_modified_since = pool_delete_options.if_modified_since + if_unmodified_since = None + if pool_delete_options is not None: + if_unmodified_since = pool_delete_options.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete.metadata = {'url': '/pools/{poolId}'} + + def exists( + self, pool_id, pool_exists_options=None, custom_headers=None, raw=False, **operation_config): + """Gets basic properties of a Pool. + + :param pool_id: The ID of the Pool to get. + :type pool_id: str + :param pool_exists_options: Additional parameters for the operation + :type pool_exists_options: ~azure.batch.models.PoolExistsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: bool or ClientRawResponse if raw=true + :rtype: bool or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_exists_options is not None: + timeout = pool_exists_options.timeout + client_request_id = None + if pool_exists_options is not None: + client_request_id = pool_exists_options.client_request_id + return_client_request_id = None + if pool_exists_options is not None: + return_client_request_id = pool_exists_options.return_client_request_id + ocp_date = None + if pool_exists_options is not None: + ocp_date = pool_exists_options.ocp_date + if_match = None + if pool_exists_options is not None: + if_match = pool_exists_options.if_match + if_none_match = None + if pool_exists_options is not None: + if_none_match = pool_exists_options.if_none_match + if_modified_since = None + if pool_exists_options is not None: + if_modified_since = pool_exists_options.if_modified_since + if_unmodified_since = None + if pool_exists_options is not None: + if_unmodified_since = pool_exists_options.if_unmodified_since + + # Construct URL + url = self.exists.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 404]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = (response.status_code == 200) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + }) + return client_raw_response + return deserialized + exists.metadata = {'url': '/pools/{poolId}'} + + def get( + self, pool_id, pool_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified Pool. + + :param pool_id: The ID of the Pool to get. + :type pool_id: str + :param pool_get_options: Additional parameters for the operation + :type pool_get_options: ~azure.batch.models.PoolGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: CloudPool or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.CloudPool or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if pool_get_options is not None: + select = pool_get_options.select + expand = None + if pool_get_options is not None: + expand = pool_get_options.expand + timeout = None + if pool_get_options is not None: + timeout = pool_get_options.timeout + client_request_id = None + if pool_get_options is not None: + client_request_id = pool_get_options.client_request_id + return_client_request_id = None + if pool_get_options is not None: + return_client_request_id = pool_get_options.return_client_request_id + ocp_date = None + if pool_get_options is not None: + ocp_date = pool_get_options.ocp_date + if_match = None + if pool_get_options is not None: + if_match = pool_get_options.if_match + if_none_match = None + if pool_get_options is not None: + if_none_match = pool_get_options.if_none_match + if_modified_since = None + if pool_get_options is not None: + if_modified_since = pool_get_options.if_modified_since + if_unmodified_since = None + if pool_get_options is not None: + if_unmodified_since = pool_get_options.if_unmodified_since + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('CloudPool', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/pools/{poolId}'} + + def patch( + self, pool_id, pool_patch_parameter, pool_patch_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified Pool. + + This only replaces the Pool properties specified in the request. For + example, if the Pool has a start Task associated with it, and a request + does not specify a start Task element, then the Pool keeps the existing + start Task. + + :param pool_id: The ID of the Pool to update. + :type pool_id: str + :param pool_patch_parameter: The parameters for the request. + :type pool_patch_parameter: ~azure.batch.models.PoolPatchParameter + :param pool_patch_options: Additional parameters for the operation + :type pool_patch_options: ~azure.batch.models.PoolPatchOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_patch_options is not None: + timeout = pool_patch_options.timeout + client_request_id = None + if pool_patch_options is not None: + client_request_id = pool_patch_options.client_request_id + return_client_request_id = None + if pool_patch_options is not None: + return_client_request_id = pool_patch_options.return_client_request_id + ocp_date = None + if pool_patch_options is not None: + ocp_date = pool_patch_options.ocp_date + if_match = None + if pool_patch_options is not None: + if_match = pool_patch_options.if_match + if_none_match = None + if pool_patch_options is not None: + if_none_match = pool_patch_options.if_none_match + if_modified_since = None + if pool_patch_options is not None: + if_modified_since = pool_patch_options.if_modified_since + if_unmodified_since = None + if pool_patch_options is not None: + if_unmodified_since = pool_patch_options.if_unmodified_since + + # Construct URL + url = self.patch.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool_patch_parameter, 'PoolPatchParameter') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + patch.metadata = {'url': '/pools/{poolId}'} + + def disable_auto_scale( + self, pool_id, pool_disable_auto_scale_options=None, custom_headers=None, raw=False, **operation_config): + """Disables automatic scaling for a Pool. + + :param pool_id: The ID of the Pool on which to disable automatic + scaling. + :type pool_id: str + :param pool_disable_auto_scale_options: Additional parameters for the + operation + :type pool_disable_auto_scale_options: + ~azure.batch.models.PoolDisableAutoScaleOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_disable_auto_scale_options is not None: + timeout = pool_disable_auto_scale_options.timeout + client_request_id = None + if pool_disable_auto_scale_options is not None: + client_request_id = pool_disable_auto_scale_options.client_request_id + return_client_request_id = None + if pool_disable_auto_scale_options is not None: + return_client_request_id = pool_disable_auto_scale_options.return_client_request_id + ocp_date = None + if pool_disable_auto_scale_options is not None: + ocp_date = pool_disable_auto_scale_options.ocp_date + + # Construct URL + url = self.disable_auto_scale.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + disable_auto_scale.metadata = {'url': '/pools/{poolId}/disableautoscale'} + + def enable_auto_scale( + self, pool_id, auto_scale_formula=None, auto_scale_evaluation_interval=None, pool_enable_auto_scale_options=None, custom_headers=None, raw=False, **operation_config): + """Enables automatic scaling for a Pool. + + You cannot enable automatic scaling on a Pool if a resize operation is + in progress on the Pool. If automatic scaling of the Pool is currently + disabled, you must specify a valid autoscale formula as part of the + request. If automatic scaling of the Pool is already enabled, you may + specify a new autoscale formula and/or a new evaluation interval. You + cannot call this API for the same Pool more than once every 30 seconds. + + :param pool_id: The ID of the Pool on which to enable automatic + scaling. + :type pool_id: str + :param auto_scale_formula: The formula for the desired number of + Compute Nodes in the Pool. The formula is checked for validity before + it is applied to the Pool. If the formula is not valid, the Batch + service rejects the request with detailed error information. For more + information about specifying this formula, see Automatically scale + Compute Nodes in an Azure Batch Pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the Pool size according to the autoscale formula. + The default value is 15 minutes. The minimum and maximum value are 5 + minutes and 168 hours respectively. If you specify a value less than 5 + minutes or greater than 168 hours, the Batch service rejects the + request with an invalid property value error; if you are calling the + REST API directly, the HTTP status code is 400 (Bad Request). If you + specify a new interval, then the existing autoscale evaluation + schedule will be stopped and a new autoscale evaluation schedule will + be started, with its starting time being the time when this request + was issued. + :type auto_scale_evaluation_interval: timedelta + :param pool_enable_auto_scale_options: Additional parameters for the + operation + :type pool_enable_auto_scale_options: + ~azure.batch.models.PoolEnableAutoScaleOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_enable_auto_scale_options is not None: + timeout = pool_enable_auto_scale_options.timeout + client_request_id = None + if pool_enable_auto_scale_options is not None: + client_request_id = pool_enable_auto_scale_options.client_request_id + return_client_request_id = None + if pool_enable_auto_scale_options is not None: + return_client_request_id = pool_enable_auto_scale_options.return_client_request_id + ocp_date = None + if pool_enable_auto_scale_options is not None: + ocp_date = pool_enable_auto_scale_options.ocp_date + if_match = None + if pool_enable_auto_scale_options is not None: + if_match = pool_enable_auto_scale_options.if_match + if_none_match = None + if pool_enable_auto_scale_options is not None: + if_none_match = pool_enable_auto_scale_options.if_none_match + if_modified_since = None + if pool_enable_auto_scale_options is not None: + if_modified_since = pool_enable_auto_scale_options.if_modified_since + if_unmodified_since = None + if pool_enable_auto_scale_options is not None: + if_unmodified_since = pool_enable_auto_scale_options.if_unmodified_since + pool_enable_auto_scale_parameter = models.PoolEnableAutoScaleParameter(auto_scale_formula=auto_scale_formula, auto_scale_evaluation_interval=auto_scale_evaluation_interval) + + # Construct URL + url = self.enable_auto_scale.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool_enable_auto_scale_parameter, 'PoolEnableAutoScaleParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + enable_auto_scale.metadata = {'url': '/pools/{poolId}/enableautoscale'} + + def evaluate_auto_scale( + self, pool_id, auto_scale_formula, pool_evaluate_auto_scale_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the result of evaluating an automatic scaling formula on the Pool. + + This API is primarily for validating an autoscale formula, as it simply + returns the result without applying the formula to the Pool. The Pool + must have auto scaling enabled in order to evaluate a formula. + + :param pool_id: The ID of the Pool on which to evaluate the automatic + scaling formula. + :type pool_id: str + :param auto_scale_formula: The formula for the desired number of + Compute Nodes in the Pool. The formula is validated and its results + calculated, but it is not applied to the Pool. To apply the formula to + the Pool, 'Enable automatic scaling on a Pool'. For more information + about specifying this formula, see Automatically scale Compute Nodes + in an Azure Batch Pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + :param pool_evaluate_auto_scale_options: Additional parameters for the + operation + :type pool_evaluate_auto_scale_options: + ~azure.batch.models.PoolEvaluateAutoScaleOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: AutoScaleRun or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.AutoScaleRun or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_evaluate_auto_scale_options is not None: + timeout = pool_evaluate_auto_scale_options.timeout + client_request_id = None + if pool_evaluate_auto_scale_options is not None: + client_request_id = pool_evaluate_auto_scale_options.client_request_id + return_client_request_id = None + if pool_evaluate_auto_scale_options is not None: + return_client_request_id = pool_evaluate_auto_scale_options.return_client_request_id + ocp_date = None + if pool_evaluate_auto_scale_options is not None: + ocp_date = pool_evaluate_auto_scale_options.ocp_date + pool_evaluate_auto_scale_parameter = models.PoolEvaluateAutoScaleParameter(auto_scale_formula=auto_scale_formula) + + # Construct URL + url = self.evaluate_auto_scale.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool_evaluate_auto_scale_parameter, 'PoolEvaluateAutoScaleParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('AutoScaleRun', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + evaluate_auto_scale.metadata = {'url': '/pools/{poolId}/evaluateautoscale'} + + def resize( + self, pool_id, pool_resize_parameter, pool_resize_options=None, custom_headers=None, raw=False, **operation_config): + """Changes the number of Compute Nodes that are assigned to a Pool. + + You can only resize a Pool when its allocation state is steady. If the + Pool is already resizing, the request fails with status code 409. When + you resize a Pool, the Pool's allocation state changes from steady to + resizing. You cannot resize Pools which are configured for automatic + scaling. If you try to do this, the Batch service returns an error 409. + If you resize a Pool downwards, the Batch service chooses which Compute + Nodes to remove. To remove specific Compute Nodes, use the Pool remove + Compute Nodes API instead. + + :param pool_id: The ID of the Pool to resize. + :type pool_id: str + :param pool_resize_parameter: The parameters for the request. + :type pool_resize_parameter: ~azure.batch.models.PoolResizeParameter + :param pool_resize_options: Additional parameters for the operation + :type pool_resize_options: ~azure.batch.models.PoolResizeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_resize_options is not None: + timeout = pool_resize_options.timeout + client_request_id = None + if pool_resize_options is not None: + client_request_id = pool_resize_options.client_request_id + return_client_request_id = None + if pool_resize_options is not None: + return_client_request_id = pool_resize_options.return_client_request_id + ocp_date = None + if pool_resize_options is not None: + ocp_date = pool_resize_options.ocp_date + if_match = None + if pool_resize_options is not None: + if_match = pool_resize_options.if_match + if_none_match = None + if pool_resize_options is not None: + if_none_match = pool_resize_options.if_none_match + if_modified_since = None + if pool_resize_options is not None: + if_modified_since = pool_resize_options.if_modified_since + if_unmodified_since = None + if pool_resize_options is not None: + if_unmodified_since = pool_resize_options.if_unmodified_since + + # Construct URL + url = self.resize.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool_resize_parameter, 'PoolResizeParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + resize.metadata = {'url': '/pools/{poolId}/resize'} + + def stop_resize( + self, pool_id, pool_stop_resize_options=None, custom_headers=None, raw=False, **operation_config): + """Stops an ongoing resize operation on the Pool. + + This does not restore the Pool to its previous state before the resize + operation: it only stops any further changes being made, and the Pool + maintains its current state. After stopping, the Pool stabilizes at the + number of Compute Nodes it was at when the stop operation was done. + During the stop operation, the Pool allocation state changes first to + stopping and then to steady. A resize operation need not be an explicit + resize Pool request; this API can also be used to halt the initial + sizing of the Pool when it is created. + + :param pool_id: The ID of the Pool whose resizing you want to stop. + :type pool_id: str + :param pool_stop_resize_options: Additional parameters for the + operation + :type pool_stop_resize_options: + ~azure.batch.models.PoolStopResizeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_stop_resize_options is not None: + timeout = pool_stop_resize_options.timeout + client_request_id = None + if pool_stop_resize_options is not None: + client_request_id = pool_stop_resize_options.client_request_id + return_client_request_id = None + if pool_stop_resize_options is not None: + return_client_request_id = pool_stop_resize_options.return_client_request_id + ocp_date = None + if pool_stop_resize_options is not None: + ocp_date = pool_stop_resize_options.ocp_date + if_match = None + if pool_stop_resize_options is not None: + if_match = pool_stop_resize_options.if_match + if_none_match = None + if pool_stop_resize_options is not None: + if_none_match = pool_stop_resize_options.if_none_match + if_modified_since = None + if pool_stop_resize_options is not None: + if_modified_since = pool_stop_resize_options.if_modified_since + if_unmodified_since = None + if pool_stop_resize_options is not None: + if_unmodified_since = pool_stop_resize_options.if_unmodified_since + + # Construct URL + url = self.stop_resize.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + stop_resize.metadata = {'url': '/pools/{poolId}/stopresize'} + + def update_properties( + self, pool_id, pool_update_properties_parameter, pool_update_properties_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified Pool. + + This fully replaces all the updatable properties of the Pool. For + example, if the Pool has a start Task associated with it and if start + Task is not specified with this request, then the Batch service will + remove the existing start Task. + + :param pool_id: The ID of the Pool to update. + :type pool_id: str + :param pool_update_properties_parameter: The parameters for the + request. + :type pool_update_properties_parameter: + ~azure.batch.models.PoolUpdatePropertiesParameter + :param pool_update_properties_options: Additional parameters for the + operation + :type pool_update_properties_options: + ~azure.batch.models.PoolUpdatePropertiesOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_update_properties_options is not None: + timeout = pool_update_properties_options.timeout + client_request_id = None + if pool_update_properties_options is not None: + client_request_id = pool_update_properties_options.client_request_id + return_client_request_id = None + if pool_update_properties_options is not None: + return_client_request_id = pool_update_properties_options.return_client_request_id + ocp_date = None + if pool_update_properties_options is not None: + ocp_date = pool_update_properties_options.ocp_date + + # Construct URL + url = self.update_properties.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool_update_properties_parameter, 'PoolUpdatePropertiesParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + update_properties.metadata = {'url': '/pools/{poolId}/updateproperties'} + + def remove_nodes( + self, pool_id, node_remove_parameter, pool_remove_nodes_options=None, custom_headers=None, raw=False, **operation_config): + """Removes Compute Nodes from the specified Pool. + + This operation can only run when the allocation state of the Pool is + steady. When this operation runs, the allocation state changes from + steady to resizing. + + :param pool_id: The ID of the Pool from which you want to remove + Compute Nodes. + :type pool_id: str + :param node_remove_parameter: The parameters for the request. + :type node_remove_parameter: ~azure.batch.models.NodeRemoveParameter + :param pool_remove_nodes_options: Additional parameters for the + operation + :type pool_remove_nodes_options: + ~azure.batch.models.PoolRemoveNodesOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_remove_nodes_options is not None: + timeout = pool_remove_nodes_options.timeout + client_request_id = None + if pool_remove_nodes_options is not None: + client_request_id = pool_remove_nodes_options.client_request_id + return_client_request_id = None + if pool_remove_nodes_options is not None: + return_client_request_id = pool_remove_nodes_options.return_client_request_id + ocp_date = None + if pool_remove_nodes_options is not None: + ocp_date = pool_remove_nodes_options.ocp_date + if_match = None + if pool_remove_nodes_options is not None: + if_match = pool_remove_nodes_options.if_match + if_none_match = None + if pool_remove_nodes_options is not None: + if_none_match = pool_remove_nodes_options.if_none_match + if_modified_since = None + if pool_remove_nodes_options is not None: + if_modified_since = pool_remove_nodes_options.if_modified_since + if_unmodified_since = None + if pool_remove_nodes_options is not None: + if_unmodified_since = pool_remove_nodes_options.if_unmodified_since + + # Construct URL + url = self.remove_nodes.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(node_remove_parameter, 'NodeRemoveParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + remove_nodes.metadata = {'url': '/pools/{poolId}/removenodes'} diff --git a/azext/generated/sdk/batch/v2019_06_01/operations/task_operations.py b/azext/generated/sdk/batch/v2019_06_01/operations/task_operations.py new file mode 100644 index 00000000..285363b7 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/operations/task_operations.py @@ -0,0 +1,1027 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class TaskOperations(object): + """TaskOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2019-06-01.9.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2019-06-01.9.0" + + self.config = config + + def add( + self, job_id, task, task_add_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a Task to the specified Job. + + The maximum lifetime of a Task from addition to completion is 180 days. + If a Task has not completed within 180 days of being added it will be + terminated by the Batch service and left in whatever state it was in at + that time. + + :param job_id: The ID of the Job to which the Task is to be added. + :type job_id: str + :param task: The Task to be added. + :type task: ~azure.batch.models.TaskAddParameter + :param task_add_options: Additional parameters for the operation + :type task_add_options: ~azure.batch.models.TaskAddOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_add_options is not None: + timeout = task_add_options.timeout + client_request_id = None + if task_add_options is not None: + client_request_id = task_add_options.client_request_id + return_client_request_id = None + if task_add_options is not None: + return_client_request_id = task_add_options.return_client_request_id + ocp_date = None + if task_add_options is not None: + ocp_date = task_add_options.ocp_date + + # Construct URL + url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(task, 'TaskAddParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add.metadata = {'url': '/jobs/{jobId}/tasks'} + + def list( + self, job_id, task_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the Tasks that are associated with the specified Job. + + For multi-instance Tasks, information such as affinityId, executionInfo + and nodeInfo refer to the primary Task. Use the list subtasks API to + retrieve information about subtasks. + + :param job_id: The ID of the Job. + :type job_id: str + :param task_list_options: Additional parameters for the operation + :type task_list_options: ~azure.batch.models.TaskListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of CloudTask + :rtype: + ~azure.batch.models.CloudTaskPaged[~azure.batch.models.CloudTask] + :raises: + :class:`BatchErrorException` + """ + filter = None + if task_list_options is not None: + filter = task_list_options.filter + select = None + if task_list_options is not None: + select = task_list_options.select + expand = None + if task_list_options is not None: + expand = task_list_options.expand + max_results = None + if task_list_options is not None: + max_results = task_list_options.max_results + timeout = None + if task_list_options is not None: + timeout = task_list_options.timeout + client_request_id = None + if task_list_options is not None: + client_request_id = task_list_options.client_request_id + return_client_request_id = None + if task_list_options is not None: + return_client_request_id = task_list_options.return_client_request_id + ocp_date = None + if task_list_options is not None: + ocp_date = task_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CloudTaskPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CloudTaskPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/jobs/{jobId}/tasks'} + + def add_collection( + self, job_id, value, task_add_collection_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a collection of Tasks to the specified Job. + + Note that each Task must have a unique ID. The Batch service may not + return the results for each Task in the same order the Tasks were + submitted in this request. If the server times out or the connection is + closed during the request, the request may have been partially or fully + processed, or not at all. In such cases, the user should re-issue the + request. Note that it is up to the user to correctly handle failures + when re-issuing a request. For example, you should use the same Task + IDs during a retry so that if the prior operation succeeded, the retry + will not create extra Tasks unexpectedly. If the response contains any + Tasks which failed to add, a client can retry the request. In a retry, + it is most efficient to resubmit only Tasks that failed to add, and to + omit Tasks that were successfully added on the first attempt. The + maximum lifetime of a Task from addition to completion is 180 days. If + a Task has not completed within 180 days of being added it will be + terminated by the Batch service and left in whatever state it was in at + that time. + + :param job_id: The ID of the Job to which the Task collection is to be + added. + :type job_id: str + :param value: The collection of Tasks to add. The maximum count of + Tasks is 100. The total serialized size of this collection must be + less than 1MB. If it is greater than 1MB (for example if each Task has + 100's of resource files or environment variables), the request will + fail with code 'RequestBodyTooLarge' and should be retried again with + fewer Tasks. + :type value: list[~azure.batch.models.TaskAddParameter] + :param task_add_collection_options: Additional parameters for the + operation + :type task_add_collection_options: + ~azure.batch.models.TaskAddCollectionOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: TaskAddCollectionResult or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.TaskAddCollectionResult or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_add_collection_options is not None: + timeout = task_add_collection_options.timeout + client_request_id = None + if task_add_collection_options is not None: + client_request_id = task_add_collection_options.client_request_id + return_client_request_id = None + if task_add_collection_options is not None: + return_client_request_id = task_add_collection_options.return_client_request_id + ocp_date = None + if task_add_collection_options is not None: + ocp_date = task_add_collection_options.ocp_date + task_collection = models.TaskAddCollectionParameter(value=value) + + # Construct URL + url = self.add_collection.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(task_collection, 'TaskAddCollectionParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('TaskAddCollectionResult', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + add_collection.metadata = {'url': '/jobs/{jobId}/addtaskcollection'} + + def delete( + self, job_id, task_id, task_delete_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a Task from the specified Job. + + When a Task is deleted, all of the files in its directory on the + Compute Node where it ran are also deleted (regardless of the retention + time). For multi-instance Tasks, the delete Task operation applies + synchronously to the primary task; subtasks and their files are then + deleted asynchronously in the background. + + :param job_id: The ID of the Job from which to delete the Task. + :type job_id: str + :param task_id: The ID of the Task to delete. + :type task_id: str + :param task_delete_options: Additional parameters for the operation + :type task_delete_options: ~azure.batch.models.TaskDeleteOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_delete_options is not None: + timeout = task_delete_options.timeout + client_request_id = None + if task_delete_options is not None: + client_request_id = task_delete_options.client_request_id + return_client_request_id = None + if task_delete_options is not None: + return_client_request_id = task_delete_options.return_client_request_id + ocp_date = None + if task_delete_options is not None: + ocp_date = task_delete_options.ocp_date + if_match = None + if task_delete_options is not None: + if_match = task_delete_options.if_match + if_none_match = None + if task_delete_options is not None: + if_none_match = task_delete_options.if_none_match + if_modified_since = None + if task_delete_options is not None: + if_modified_since = task_delete_options.if_modified_since + if_unmodified_since = None + if task_delete_options is not None: + if_unmodified_since = task_delete_options.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}'} + + def get( + self, job_id, task_id, task_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified Task. + + For multi-instance Tasks, information such as affinityId, executionInfo + and nodeInfo refer to the primary Task. Use the list subtasks API to + retrieve information about subtasks. + + :param job_id: The ID of the Job that contains the Task. + :type job_id: str + :param task_id: The ID of the Task to get information about. + :type task_id: str + :param task_get_options: Additional parameters for the operation + :type task_get_options: ~azure.batch.models.TaskGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: CloudTask or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.CloudTask or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if task_get_options is not None: + select = task_get_options.select + expand = None + if task_get_options is not None: + expand = task_get_options.expand + timeout = None + if task_get_options is not None: + timeout = task_get_options.timeout + client_request_id = None + if task_get_options is not None: + client_request_id = task_get_options.client_request_id + return_client_request_id = None + if task_get_options is not None: + return_client_request_id = task_get_options.return_client_request_id + ocp_date = None + if task_get_options is not None: + ocp_date = task_get_options.ocp_date + if_match = None + if task_get_options is not None: + if_match = task_get_options.if_match + if_none_match = None + if task_get_options is not None: + if_none_match = task_get_options.if_none_match + if_modified_since = None + if task_get_options is not None: + if_modified_since = task_get_options.if_modified_since + if_unmodified_since = None + if task_get_options is not None: + if_unmodified_since = task_get_options.if_unmodified_since + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('CloudTask', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}'} + + def update( + self, job_id, task_id, constraints=None, task_update_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified Task. + + :param job_id: The ID of the Job containing the Task. + :type job_id: str + :param task_id: The ID of the Task to update. + :type task_id: str + :param constraints: Constraints that apply to this Task. If omitted, + the Task is given the default constraints. For multi-instance Tasks, + updating the retention time applies only to the primary Task and not + subtasks. + :type constraints: ~azure.batch.models.TaskConstraints + :param task_update_options: Additional parameters for the operation + :type task_update_options: ~azure.batch.models.TaskUpdateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_update_options is not None: + timeout = task_update_options.timeout + client_request_id = None + if task_update_options is not None: + client_request_id = task_update_options.client_request_id + return_client_request_id = None + if task_update_options is not None: + return_client_request_id = task_update_options.return_client_request_id + ocp_date = None + if task_update_options is not None: + ocp_date = task_update_options.ocp_date + if_match = None + if task_update_options is not None: + if_match = task_update_options.if_match + if_none_match = None + if task_update_options is not None: + if_none_match = task_update_options.if_none_match + if_modified_since = None + if task_update_options is not None: + if_modified_since = task_update_options.if_modified_since + if_unmodified_since = None + if task_update_options is not None: + if_unmodified_since = task_update_options.if_unmodified_since + task_update_parameter = models.TaskUpdateParameter(constraints=constraints) + + # Construct URL + url = self.update.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(task_update_parameter, 'TaskUpdateParameter') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + update.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}'} + + def list_subtasks( + self, job_id, task_id, task_list_subtasks_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the subtasks that are associated with the specified + multi-instance Task. + + If the Task is not a multi-instance Task then this returns an empty + collection. + + :param job_id: The ID of the Job. + :type job_id: str + :param task_id: The ID of the Task. + :type task_id: str + :param task_list_subtasks_options: Additional parameters for the + operation + :type task_list_subtasks_options: + ~azure.batch.models.TaskListSubtasksOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: CloudTaskListSubtasksResult or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.CloudTaskListSubtasksResult or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if task_list_subtasks_options is not None: + select = task_list_subtasks_options.select + timeout = None + if task_list_subtasks_options is not None: + timeout = task_list_subtasks_options.timeout + client_request_id = None + if task_list_subtasks_options is not None: + client_request_id = task_list_subtasks_options.client_request_id + return_client_request_id = None + if task_list_subtasks_options is not None: + return_client_request_id = task_list_subtasks_options.return_client_request_id + ocp_date = None + if task_list_subtasks_options is not None: + ocp_date = task_list_subtasks_options.ocp_date + + # Construct URL + url = self.list_subtasks.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('CloudTaskListSubtasksResult', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + list_subtasks.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/subtasksinfo'} + + def terminate( + self, job_id, task_id, task_terminate_options=None, custom_headers=None, raw=False, **operation_config): + """Terminates the specified Task. + + When the Task has been terminated, it moves to the completed state. For + multi-instance Tasks, the terminate Task operation applies + synchronously to the primary task; subtasks are then terminated + asynchronously in the background. + + :param job_id: The ID of the Job containing the Task. + :type job_id: str + :param task_id: The ID of the Task to terminate. + :type task_id: str + :param task_terminate_options: Additional parameters for the operation + :type task_terminate_options: ~azure.batch.models.TaskTerminateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_terminate_options is not None: + timeout = task_terminate_options.timeout + client_request_id = None + if task_terminate_options is not None: + client_request_id = task_terminate_options.client_request_id + return_client_request_id = None + if task_terminate_options is not None: + return_client_request_id = task_terminate_options.return_client_request_id + ocp_date = None + if task_terminate_options is not None: + ocp_date = task_terminate_options.ocp_date + if_match = None + if task_terminate_options is not None: + if_match = task_terminate_options.if_match + if_none_match = None + if task_terminate_options is not None: + if_none_match = task_terminate_options.if_none_match + if_modified_since = None + if task_terminate_options is not None: + if_modified_since = task_terminate_options.if_modified_since + if_unmodified_since = None + if task_terminate_options is not None: + if_unmodified_since = task_terminate_options.if_unmodified_since + + # Construct URL + url = self.terminate.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + terminate.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/terminate'} + + def reactivate( + self, job_id, task_id, task_reactivate_options=None, custom_headers=None, raw=False, **operation_config): + """Reactivates a Task, allowing it to run again even if its retry count + has been exhausted. + + Reactivation makes a Task eligible to be retried again up to its + maximum retry count. The Task's state is changed to active. As the Task + is no longer in the completed state, any previous exit code or failure + information is no longer available after reactivation. Each time a Task + is reactivated, its retry count is reset to 0. Reactivation will fail + for Tasks that are not completed or that previously completed + successfully (with an exit code of 0). Additionally, it will fail if + the Job has completed (or is terminating or deleting). + + :param job_id: The ID of the Job containing the Task. + :type job_id: str + :param task_id: The ID of the Task to reactivate. + :type task_id: str + :param task_reactivate_options: Additional parameters for the + operation + :type task_reactivate_options: + ~azure.batch.models.TaskReactivateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_reactivate_options is not None: + timeout = task_reactivate_options.timeout + client_request_id = None + if task_reactivate_options is not None: + client_request_id = task_reactivate_options.client_request_id + return_client_request_id = None + if task_reactivate_options is not None: + return_client_request_id = task_reactivate_options.return_client_request_id + ocp_date = None + if task_reactivate_options is not None: + ocp_date = task_reactivate_options.ocp_date + if_match = None + if task_reactivate_options is not None: + if_match = task_reactivate_options.if_match + if_none_match = None + if task_reactivate_options is not None: + if_none_match = task_reactivate_options.if_none_match + if_modified_since = None + if task_reactivate_options is not None: + if_modified_since = task_reactivate_options.if_modified_since + if_unmodified_since = None + if task_reactivate_options is not None: + if_unmodified_since = task_reactivate_options.if_unmodified_since + + # Construct URL + url = self.reactivate.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + reactivate.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/reactivate'} diff --git a/azext/generated/sdk/batch/v2019_06_01/version.py b/azext/generated/sdk/batch/v2019_06_01/version.py new file mode 100644 index 00000000..d4fa93c3 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_06_01/version.py @@ -0,0 +1,13 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +VERSION = "2019-06-01.9.0" + diff --git a/azext/generated/sdk/batch/v2019_08_01/__init__.py b/azext/generated/sdk/batch/v2019_08_01/__init__.py new file mode 100644 index 00000000..f27e0cb6 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/__init__.py @@ -0,0 +1,18 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .batch_service_client import BatchServiceClient +from .version import VERSION + +__all__ = ['BatchServiceClient'] + +__version__ = VERSION + diff --git a/azext/generated/sdk/batch/v2019_08_01/batch_service_client.py b/azext/generated/sdk/batch/v2019_08_01/batch_service_client.py new file mode 100644 index 00000000..cfd8ae90 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/batch_service_client.py @@ -0,0 +1,118 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.service_client import SDKClient +from msrest import Serializer, Deserializer +from msrestazure import AzureConfiguration +from .version import VERSION +from .operations.application_operations import ApplicationOperations +from .operations.pool_operations import PoolOperations +from .operations.account_operations import AccountOperations +from .operations.job_operations import JobOperations +from .operations.certificate_operations import CertificateOperations +from .operations.file_operations import FileOperations +from .operations.job_schedule_operations import JobScheduleOperations +from .operations.task_operations import TaskOperations +from .operations.compute_node_operations import ComputeNodeOperations +from . import models + + +class BatchServiceClientConfiguration(AzureConfiguration): + """Configuration for BatchServiceClient + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credentials: Credentials needed for the client to connect to Azure. + :type credentials: :mod:`A msrestazure Credentials + object` + :param batch_url: The base URL for all Azure Batch service requests. + :type batch_url: str + """ + + def __init__( + self, credentials, batch_url): + + if credentials is None: + raise ValueError("Parameter 'credentials' must not be None.") + if batch_url is None: + raise ValueError("Parameter 'batch_url' must not be None.") + base_url = '{batchUrl}' + + super(BatchServiceClientConfiguration, self).__init__(base_url) + + self.add_user_agent('azure-batch/{}'.format(VERSION)) + self.add_user_agent('Azure-SDK-For-Python') + + self.credentials = credentials + self.batch_url = batch_url + + +class BatchServiceClient(SDKClient): + """A client for issuing REST requests to the Azure Batch service. + + :ivar config: Configuration for client. + :vartype config: BatchServiceClientConfiguration + + :ivar application: Application operations + :vartype application: azure.batch.operations.ApplicationOperations + :ivar pool: Pool operations + :vartype pool: azure.batch.operations.PoolOperations + :ivar account: Account operations + :vartype account: azure.batch.operations.AccountOperations + :ivar job: Job operations + :vartype job: azure.batch.operations.JobOperations + :ivar certificate: Certificate operations + :vartype certificate: azure.batch.operations.CertificateOperations + :ivar file: File operations + :vartype file: azure.batch.operations.FileOperations + :ivar job_schedule: JobSchedule operations + :vartype job_schedule: azure.batch.operations.JobScheduleOperations + :ivar task: Task operations + :vartype task: azure.batch.operations.TaskOperations + :ivar compute_node: ComputeNode operations + :vartype compute_node: azure.batch.operations.ComputeNodeOperations + + :param credentials: Credentials needed for the client to connect to Azure. + :type credentials: :mod:`A msrestazure Credentials + object` + :param batch_url: The base URL for all Azure Batch service requests. + :type batch_url: str + """ + + def __init__( + self, credentials, batch_url): + + self.config = BatchServiceClientConfiguration(credentials, batch_url) + super(BatchServiceClient, self).__init__(self.config.credentials, self.config) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self.api_version = '2019-08-01.10.0' + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.application = ApplicationOperations( + self._client, self.config, self._serialize, self._deserialize) + self.pool = PoolOperations( + self._client, self.config, self._serialize, self._deserialize) + self.account = AccountOperations( + self._client, self.config, self._serialize, self._deserialize) + self.job = JobOperations( + self._client, self.config, self._serialize, self._deserialize) + self.certificate = CertificateOperations( + self._client, self.config, self._serialize, self._deserialize) + self.file = FileOperations( + self._client, self.config, self._serialize, self._deserialize) + self.job_schedule = JobScheduleOperations( + self._client, self.config, self._serialize, self._deserialize) + self.task = TaskOperations( + self._client, self.config, self._serialize, self._deserialize) + self.compute_node = ComputeNodeOperations( + self._client, self.config, self._serialize, self._deserialize) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/__init__.py b/azext/generated/sdk/batch/v2019_08_01/models/__init__.py new file mode 100644 index 00000000..279ba7b0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/__init__.py @@ -0,0 +1,741 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +try: + from .pool_usage_metrics_py3 import PoolUsageMetrics + from .image_reference_py3 import ImageReference + from .image_information_py3 import ImageInformation + from .authentication_token_settings_py3 import AuthenticationTokenSettings + from .usage_statistics_py3 import UsageStatistics + from .resource_statistics_py3 import ResourceStatistics + from .pool_statistics_py3 import PoolStatistics + from .job_statistics_py3 import JobStatistics + from .name_value_pair_py3 import NameValuePair + from .delete_certificate_error_py3 import DeleteCertificateError + from .certificate_py3 import Certificate + from .application_package_reference_py3 import ApplicationPackageReference + from .application_summary_py3 import ApplicationSummary + from .certificate_add_parameter_py3 import CertificateAddParameter + from .file_properties_py3 import FileProperties + from .node_file_py3 import NodeFile + from .schedule_py3 import Schedule + from .job_constraints_py3 import JobConstraints + from .job_network_configuration_py3 import JobNetworkConfiguration + from .container_registry_py3 import ContainerRegistry + from .task_container_settings_py3 import TaskContainerSettings + from .resource_file_py3 import ResourceFile + from .environment_setting_py3 import EnvironmentSetting + from .exit_options_py3 import ExitOptions + from .exit_code_mapping_py3 import ExitCodeMapping + from .exit_code_range_mapping_py3 import ExitCodeRangeMapping + from .exit_conditions_py3 import ExitConditions + from .auto_user_specification_py3 import AutoUserSpecification + from .user_identity_py3 import UserIdentity + from .linux_user_configuration_py3 import LinuxUserConfiguration + from .windows_user_configuration_py3 import WindowsUserConfiguration + from .user_account_py3 import UserAccount + from .task_constraints_py3 import TaskConstraints + from .output_file_blob_container_destination_py3 import OutputFileBlobContainerDestination + from .output_file_destination_py3 import OutputFileDestination + from .output_file_upload_options_py3 import OutputFileUploadOptions + from .output_file_py3 import OutputFile + from .job_manager_task_py3 import JobManagerTask + from .job_preparation_task_py3 import JobPreparationTask + from .job_release_task_py3 import JobReleaseTask + from .task_scheduling_policy_py3 import TaskSchedulingPolicy + from .start_task_py3 import StartTask + from .certificate_reference_py3 import CertificateReference + from .metadata_item_py3 import MetadataItem + from .cloud_service_configuration_py3 import CloudServiceConfiguration + from .windows_configuration_py3 import WindowsConfiguration + from .data_disk_py3 import DataDisk + from .container_configuration_py3 import ContainerConfiguration + from .virtual_machine_configuration_py3 import VirtualMachineConfiguration + from .network_security_group_rule_py3 import NetworkSecurityGroupRule + from .inbound_nat_pool_py3 import InboundNATPool + from .pool_endpoint_configuration_py3 import PoolEndpointConfiguration + from .network_configuration_py3 import NetworkConfiguration + from .azure_blob_file_system_configuration_py3 import AzureBlobFileSystemConfiguration + from .nfs_mount_configuration_py3 import NFSMountConfiguration + from .cifs_mount_configuration_py3 import CIFSMountConfiguration + from .azure_file_share_configuration_py3 import AzureFileShareConfiguration + from .mount_configuration_py3 import MountConfiguration + from .pool_specification_py3 import PoolSpecification + from .auto_pool_specification_py3 import AutoPoolSpecification + from .pool_information_py3 import PoolInformation + from .job_specification_py3 import JobSpecification + from .recent_job_py3 import RecentJob + from .job_schedule_execution_information_py3 import JobScheduleExecutionInformation + from .job_schedule_statistics_py3 import JobScheduleStatistics + from .cloud_job_schedule_py3 import CloudJobSchedule + from .job_schedule_add_parameter_py3 import JobScheduleAddParameter + from .job_scheduling_error_py3 import JobSchedulingError + from .job_execution_information_py3 import JobExecutionInformation + from .cloud_job_py3 import CloudJob + from .job_add_parameter_py3 import JobAddParameter + from .task_container_execution_information_py3 import TaskContainerExecutionInformation + from .task_failure_information_py3 import TaskFailureInformation + from .job_preparation_task_execution_information_py3 import JobPreparationTaskExecutionInformation + from .job_release_task_execution_information_py3 import JobReleaseTaskExecutionInformation + from .job_preparation_and_release_task_execution_information_py3 import JobPreparationAndReleaseTaskExecutionInformation + from .task_counts_py3 import TaskCounts + from .auto_scale_run_error_py3 import AutoScaleRunError + from .auto_scale_run_py3 import AutoScaleRun + from .resize_error_py3 import ResizeError + from .cloud_pool_py3 import CloudPool + from .pool_add_parameter_py3 import PoolAddParameter + from .affinity_information_py3 import AffinityInformation + from .task_execution_information_py3 import TaskExecutionInformation + from .compute_node_information_py3 import ComputeNodeInformation + from .node_agent_information_py3 import NodeAgentInformation + from .multi_instance_settings_py3 import MultiInstanceSettings + from .task_statistics_py3 import TaskStatistics + from .task_id_range_py3 import TaskIdRange + from .task_dependencies_py3 import TaskDependencies + from .cloud_task_py3 import CloudTask + from .task_add_parameter_py3 import TaskAddParameter + from .task_add_collection_parameter_py3 import TaskAddCollectionParameter + from .error_message_py3 import ErrorMessage + from .batch_error_detail_py3 import BatchErrorDetail + from .batch_error_py3 import BatchError, BatchErrorException + from .task_add_result_py3 import TaskAddResult + from .task_add_collection_result_py3 import TaskAddCollectionResult + from .subtask_information_py3 import SubtaskInformation + from .cloud_task_list_subtasks_result_py3 import CloudTaskListSubtasksResult + from .task_information_py3 import TaskInformation + from .start_task_information_py3 import StartTaskInformation + from .compute_node_error_py3 import ComputeNodeError + from .inbound_endpoint_py3 import InboundEndpoint + from .compute_node_endpoint_configuration_py3 import ComputeNodeEndpointConfiguration + from .compute_node_py3 import ComputeNode + from .compute_node_user_py3 import ComputeNodeUser + from .compute_node_get_remote_login_settings_result_py3 import ComputeNodeGetRemoteLoginSettingsResult + from .job_schedule_patch_parameter_py3 import JobSchedulePatchParameter + from .job_schedule_update_parameter_py3 import JobScheduleUpdateParameter + from .job_disable_parameter_py3 import JobDisableParameter + from .job_terminate_parameter_py3 import JobTerminateParameter + from .job_patch_parameter_py3 import JobPatchParameter + from .job_update_parameter_py3 import JobUpdateParameter + from .pool_enable_auto_scale_parameter_py3 import PoolEnableAutoScaleParameter + from .pool_evaluate_auto_scale_parameter_py3 import PoolEvaluateAutoScaleParameter + from .pool_resize_parameter_py3 import PoolResizeParameter + from .pool_update_properties_parameter_py3 import PoolUpdatePropertiesParameter + from .pool_patch_parameter_py3 import PoolPatchParameter + from .task_update_parameter_py3 import TaskUpdateParameter + from .node_update_user_parameter_py3 import NodeUpdateUserParameter + from .node_reboot_parameter_py3 import NodeRebootParameter + from .node_reimage_parameter_py3 import NodeReimageParameter + from .node_disable_scheduling_parameter_py3 import NodeDisableSchedulingParameter + from .node_remove_parameter_py3 import NodeRemoveParameter + from .upload_batch_service_logs_configuration_py3 import UploadBatchServiceLogsConfiguration + from .upload_batch_service_logs_result_py3 import UploadBatchServiceLogsResult + from .node_counts_py3 import NodeCounts + from .pool_node_counts_py3 import PoolNodeCounts + from .application_list_options_py3 import ApplicationListOptions + from .application_get_options_py3 import ApplicationGetOptions + from .pool_list_usage_metrics_options_py3 import PoolListUsageMetricsOptions + from .pool_get_all_lifetime_statistics_options_py3 import PoolGetAllLifetimeStatisticsOptions + from .pool_add_options_py3 import PoolAddOptions + from .pool_list_options_py3 import PoolListOptions + from .pool_delete_options_py3 import PoolDeleteOptions + from .pool_exists_options_py3 import PoolExistsOptions + from .pool_get_options_py3 import PoolGetOptions + from .pool_patch_options_py3 import PoolPatchOptions + from .pool_disable_auto_scale_options_py3 import PoolDisableAutoScaleOptions + from .pool_enable_auto_scale_options_py3 import PoolEnableAutoScaleOptions + from .pool_evaluate_auto_scale_options_py3 import PoolEvaluateAutoScaleOptions + from .pool_resize_options_py3 import PoolResizeOptions + from .pool_stop_resize_options_py3 import PoolStopResizeOptions + from .pool_update_properties_options_py3 import PoolUpdatePropertiesOptions + from .pool_remove_nodes_options_py3 import PoolRemoveNodesOptions + from .account_list_supported_images_options_py3 import AccountListSupportedImagesOptions + from .account_list_pool_node_counts_options_py3 import AccountListPoolNodeCountsOptions + from .job_get_all_lifetime_statistics_options_py3 import JobGetAllLifetimeStatisticsOptions + from .job_delete_options_py3 import JobDeleteOptions + from .job_get_options_py3 import JobGetOptions + from .job_patch_options_py3 import JobPatchOptions + from .job_update_options_py3 import JobUpdateOptions + from .job_disable_options_py3 import JobDisableOptions + from .job_enable_options_py3 import JobEnableOptions + from .job_terminate_options_py3 import JobTerminateOptions + from .job_add_options_py3 import JobAddOptions + from .job_list_options_py3 import JobListOptions + from .job_list_from_job_schedule_options_py3 import JobListFromJobScheduleOptions + from .job_list_preparation_and_release_task_status_options_py3 import JobListPreparationAndReleaseTaskStatusOptions + from .job_get_task_counts_options_py3 import JobGetTaskCountsOptions + from .certificate_add_options_py3 import CertificateAddOptions + from .certificate_list_options_py3 import CertificateListOptions + from .certificate_cancel_deletion_options_py3 import CertificateCancelDeletionOptions + from .certificate_delete_options_py3 import CertificateDeleteOptions + from .certificate_get_options_py3 import CertificateGetOptions + from .file_delete_from_task_options_py3 import FileDeleteFromTaskOptions + from .file_get_from_task_options_py3 import FileGetFromTaskOptions + from .file_get_properties_from_task_options_py3 import FileGetPropertiesFromTaskOptions + from .file_delete_from_compute_node_options_py3 import FileDeleteFromComputeNodeOptions + from .file_get_from_compute_node_options_py3 import FileGetFromComputeNodeOptions + from .file_get_properties_from_compute_node_options_py3 import FileGetPropertiesFromComputeNodeOptions + from .file_list_from_task_options_py3 import FileListFromTaskOptions + from .file_list_from_compute_node_options_py3 import FileListFromComputeNodeOptions + from .job_schedule_exists_options_py3 import JobScheduleExistsOptions + from .job_schedule_delete_options_py3 import JobScheduleDeleteOptions + from .job_schedule_get_options_py3 import JobScheduleGetOptions + from .job_schedule_patch_options_py3 import JobSchedulePatchOptions + from .job_schedule_update_options_py3 import JobScheduleUpdateOptions + from .job_schedule_disable_options_py3 import JobScheduleDisableOptions + from .job_schedule_enable_options_py3 import JobScheduleEnableOptions + from .job_schedule_terminate_options_py3 import JobScheduleTerminateOptions + from .job_schedule_add_options_py3 import JobScheduleAddOptions + from .job_schedule_list_options_py3 import JobScheduleListOptions + from .task_add_options_py3 import TaskAddOptions + from .task_list_options_py3 import TaskListOptions + from .task_add_collection_options_py3 import TaskAddCollectionOptions + from .task_delete_options_py3 import TaskDeleteOptions + from .task_get_options_py3 import TaskGetOptions + from .task_update_options_py3 import TaskUpdateOptions + from .task_list_subtasks_options_py3 import TaskListSubtasksOptions + from .task_terminate_options_py3 import TaskTerminateOptions + from .task_reactivate_options_py3 import TaskReactivateOptions + from .compute_node_add_user_options_py3 import ComputeNodeAddUserOptions + from .compute_node_delete_user_options_py3 import ComputeNodeDeleteUserOptions + from .compute_node_update_user_options_py3 import ComputeNodeUpdateUserOptions + from .compute_node_get_options_py3 import ComputeNodeGetOptions + from .compute_node_reboot_options_py3 import ComputeNodeRebootOptions + from .compute_node_reimage_options_py3 import ComputeNodeReimageOptions + from .compute_node_disable_scheduling_options_py3 import ComputeNodeDisableSchedulingOptions + from .compute_node_enable_scheduling_options_py3 import ComputeNodeEnableSchedulingOptions + from .compute_node_get_remote_login_settings_options_py3 import ComputeNodeGetRemoteLoginSettingsOptions + from .compute_node_get_remote_desktop_options_py3 import ComputeNodeGetRemoteDesktopOptions + from .compute_node_upload_batch_service_logs_options_py3 import ComputeNodeUploadBatchServiceLogsOptions + from .compute_node_list_options_py3 import ComputeNodeListOptions +except (SyntaxError, ImportError): + from .pool_usage_metrics import PoolUsageMetrics + from .image_reference import ImageReference + from .image_information import ImageInformation + from .authentication_token_settings import AuthenticationTokenSettings + from .usage_statistics import UsageStatistics + from .resource_statistics import ResourceStatistics + from .pool_statistics import PoolStatistics + from .job_statistics import JobStatistics + from .name_value_pair import NameValuePair + from .delete_certificate_error import DeleteCertificateError + from .certificate import Certificate + from .application_package_reference import ApplicationPackageReference + from .application_summary import ApplicationSummary + from .certificate_add_parameter import CertificateAddParameter + from .file_properties import FileProperties + from .node_file import NodeFile + from .schedule import Schedule + from .job_constraints import JobConstraints + from .job_network_configuration import JobNetworkConfiguration + from .container_registry import ContainerRegistry + from .task_container_settings import TaskContainerSettings + from .resource_file import ResourceFile + from .environment_setting import EnvironmentSetting + from .exit_options import ExitOptions + from .exit_code_mapping import ExitCodeMapping + from .exit_code_range_mapping import ExitCodeRangeMapping + from .exit_conditions import ExitConditions + from .auto_user_specification import AutoUserSpecification + from .user_identity import UserIdentity + from .linux_user_configuration import LinuxUserConfiguration + from .windows_user_configuration import WindowsUserConfiguration + from .user_account import UserAccount + from .task_constraints import TaskConstraints + from .output_file_blob_container_destination import OutputFileBlobContainerDestination + from .output_file_destination import OutputFileDestination + from .output_file_upload_options import OutputFileUploadOptions + from .output_file import OutputFile + from .job_manager_task import JobManagerTask + from .job_preparation_task import JobPreparationTask + from .job_release_task import JobReleaseTask + from .task_scheduling_policy import TaskSchedulingPolicy + from .start_task import StartTask + from .certificate_reference import CertificateReference + from .metadata_item import MetadataItem + from .cloud_service_configuration import CloudServiceConfiguration + from .windows_configuration import WindowsConfiguration + from .data_disk import DataDisk + from .container_configuration import ContainerConfiguration + from .virtual_machine_configuration import VirtualMachineConfiguration + from .network_security_group_rule import NetworkSecurityGroupRule + from .inbound_nat_pool import InboundNATPool + from .pool_endpoint_configuration import PoolEndpointConfiguration + from .network_configuration import NetworkConfiguration + from .azure_blob_file_system_configuration import AzureBlobFileSystemConfiguration + from .nfs_mount_configuration import NFSMountConfiguration + from .cifs_mount_configuration import CIFSMountConfiguration + from .azure_file_share_configuration import AzureFileShareConfiguration + from .mount_configuration import MountConfiguration + from .pool_specification import PoolSpecification + from .auto_pool_specification import AutoPoolSpecification + from .pool_information import PoolInformation + from .job_specification import JobSpecification + from .recent_job import RecentJob + from .job_schedule_execution_information import JobScheduleExecutionInformation + from .job_schedule_statistics import JobScheduleStatistics + from .cloud_job_schedule import CloudJobSchedule + from .job_schedule_add_parameter import JobScheduleAddParameter + from .job_scheduling_error import JobSchedulingError + from .job_execution_information import JobExecutionInformation + from .cloud_job import CloudJob + from .job_add_parameter import JobAddParameter + from .task_container_execution_information import TaskContainerExecutionInformation + from .task_failure_information import TaskFailureInformation + from .job_preparation_task_execution_information import JobPreparationTaskExecutionInformation + from .job_release_task_execution_information import JobReleaseTaskExecutionInformation + from .job_preparation_and_release_task_execution_information import JobPreparationAndReleaseTaskExecutionInformation + from .task_counts import TaskCounts + from .auto_scale_run_error import AutoScaleRunError + from .auto_scale_run import AutoScaleRun + from .resize_error import ResizeError + from .cloud_pool import CloudPool + from .pool_add_parameter import PoolAddParameter + from .affinity_information import AffinityInformation + from .task_execution_information import TaskExecutionInformation + from .compute_node_information import ComputeNodeInformation + from .node_agent_information import NodeAgentInformation + from .multi_instance_settings import MultiInstanceSettings + from .task_statistics import TaskStatistics + from .task_id_range import TaskIdRange + from .task_dependencies import TaskDependencies + from .cloud_task import CloudTask + from .task_add_parameter import TaskAddParameter + from .task_add_collection_parameter import TaskAddCollectionParameter + from .error_message import ErrorMessage + from .batch_error_detail import BatchErrorDetail + from .batch_error import BatchError, BatchErrorException + from .task_add_result import TaskAddResult + from .task_add_collection_result import TaskAddCollectionResult + from .subtask_information import SubtaskInformation + from .cloud_task_list_subtasks_result import CloudTaskListSubtasksResult + from .task_information import TaskInformation + from .start_task_information import StartTaskInformation + from .compute_node_error import ComputeNodeError + from .inbound_endpoint import InboundEndpoint + from .compute_node_endpoint_configuration import ComputeNodeEndpointConfiguration + from .compute_node import ComputeNode + from .compute_node_user import ComputeNodeUser + from .compute_node_get_remote_login_settings_result import ComputeNodeGetRemoteLoginSettingsResult + from .job_schedule_patch_parameter import JobSchedulePatchParameter + from .job_schedule_update_parameter import JobScheduleUpdateParameter + from .job_disable_parameter import JobDisableParameter + from .job_terminate_parameter import JobTerminateParameter + from .job_patch_parameter import JobPatchParameter + from .job_update_parameter import JobUpdateParameter + from .pool_enable_auto_scale_parameter import PoolEnableAutoScaleParameter + from .pool_evaluate_auto_scale_parameter import PoolEvaluateAutoScaleParameter + from .pool_resize_parameter import PoolResizeParameter + from .pool_update_properties_parameter import PoolUpdatePropertiesParameter + from .pool_patch_parameter import PoolPatchParameter + from .task_update_parameter import TaskUpdateParameter + from .node_update_user_parameter import NodeUpdateUserParameter + from .node_reboot_parameter import NodeRebootParameter + from .node_reimage_parameter import NodeReimageParameter + from .node_disable_scheduling_parameter import NodeDisableSchedulingParameter + from .node_remove_parameter import NodeRemoveParameter + from .upload_batch_service_logs_configuration import UploadBatchServiceLogsConfiguration + from .upload_batch_service_logs_result import UploadBatchServiceLogsResult + from .node_counts import NodeCounts + from .pool_node_counts import PoolNodeCounts + from .application_list_options import ApplicationListOptions + from .application_get_options import ApplicationGetOptions + from .pool_list_usage_metrics_options import PoolListUsageMetricsOptions + from .pool_get_all_lifetime_statistics_options import PoolGetAllLifetimeStatisticsOptions + from .pool_add_options import PoolAddOptions + from .pool_list_options import PoolListOptions + from .pool_delete_options import PoolDeleteOptions + from .pool_exists_options import PoolExistsOptions + from .pool_get_options import PoolGetOptions + from .pool_patch_options import PoolPatchOptions + from .pool_disable_auto_scale_options import PoolDisableAutoScaleOptions + from .pool_enable_auto_scale_options import PoolEnableAutoScaleOptions + from .pool_evaluate_auto_scale_options import PoolEvaluateAutoScaleOptions + from .pool_resize_options import PoolResizeOptions + from .pool_stop_resize_options import PoolStopResizeOptions + from .pool_update_properties_options import PoolUpdatePropertiesOptions + from .pool_remove_nodes_options import PoolRemoveNodesOptions + from .account_list_supported_images_options import AccountListSupportedImagesOptions + from .account_list_pool_node_counts_options import AccountListPoolNodeCountsOptions + from .job_get_all_lifetime_statistics_options import JobGetAllLifetimeStatisticsOptions + from .job_delete_options import JobDeleteOptions + from .job_get_options import JobGetOptions + from .job_patch_options import JobPatchOptions + from .job_update_options import JobUpdateOptions + from .job_disable_options import JobDisableOptions + from .job_enable_options import JobEnableOptions + from .job_terminate_options import JobTerminateOptions + from .job_add_options import JobAddOptions + from .job_list_options import JobListOptions + from .job_list_from_job_schedule_options import JobListFromJobScheduleOptions + from .job_list_preparation_and_release_task_status_options import JobListPreparationAndReleaseTaskStatusOptions + from .job_get_task_counts_options import JobGetTaskCountsOptions + from .certificate_add_options import CertificateAddOptions + from .certificate_list_options import CertificateListOptions + from .certificate_cancel_deletion_options import CertificateCancelDeletionOptions + from .certificate_delete_options import CertificateDeleteOptions + from .certificate_get_options import CertificateGetOptions + from .file_delete_from_task_options import FileDeleteFromTaskOptions + from .file_get_from_task_options import FileGetFromTaskOptions + from .file_get_properties_from_task_options import FileGetPropertiesFromTaskOptions + from .file_delete_from_compute_node_options import FileDeleteFromComputeNodeOptions + from .file_get_from_compute_node_options import FileGetFromComputeNodeOptions + from .file_get_properties_from_compute_node_options import FileGetPropertiesFromComputeNodeOptions + from .file_list_from_task_options import FileListFromTaskOptions + from .file_list_from_compute_node_options import FileListFromComputeNodeOptions + from .job_schedule_exists_options import JobScheduleExistsOptions + from .job_schedule_delete_options import JobScheduleDeleteOptions + from .job_schedule_get_options import JobScheduleGetOptions + from .job_schedule_patch_options import JobSchedulePatchOptions + from .job_schedule_update_options import JobScheduleUpdateOptions + from .job_schedule_disable_options import JobScheduleDisableOptions + from .job_schedule_enable_options import JobScheduleEnableOptions + from .job_schedule_terminate_options import JobScheduleTerminateOptions + from .job_schedule_add_options import JobScheduleAddOptions + from .job_schedule_list_options import JobScheduleListOptions + from .task_add_options import TaskAddOptions + from .task_list_options import TaskListOptions + from .task_add_collection_options import TaskAddCollectionOptions + from .task_delete_options import TaskDeleteOptions + from .task_get_options import TaskGetOptions + from .task_update_options import TaskUpdateOptions + from .task_list_subtasks_options import TaskListSubtasksOptions + from .task_terminate_options import TaskTerminateOptions + from .task_reactivate_options import TaskReactivateOptions + from .compute_node_add_user_options import ComputeNodeAddUserOptions + from .compute_node_delete_user_options import ComputeNodeDeleteUserOptions + from .compute_node_update_user_options import ComputeNodeUpdateUserOptions + from .compute_node_get_options import ComputeNodeGetOptions + from .compute_node_reboot_options import ComputeNodeRebootOptions + from .compute_node_reimage_options import ComputeNodeReimageOptions + from .compute_node_disable_scheduling_options import ComputeNodeDisableSchedulingOptions + from .compute_node_enable_scheduling_options import ComputeNodeEnableSchedulingOptions + from .compute_node_get_remote_login_settings_options import ComputeNodeGetRemoteLoginSettingsOptions + from .compute_node_get_remote_desktop_options import ComputeNodeGetRemoteDesktopOptions + from .compute_node_upload_batch_service_logs_options import ComputeNodeUploadBatchServiceLogsOptions + from .compute_node_list_options import ComputeNodeListOptions +from .application_summary_paged import ApplicationSummaryPaged +from .pool_usage_metrics_paged import PoolUsageMetricsPaged +from .cloud_pool_paged import CloudPoolPaged +from .image_information_paged import ImageInformationPaged +from .pool_node_counts_paged import PoolNodeCountsPaged +from .cloud_job_paged import CloudJobPaged +from .job_preparation_and_release_task_execution_information_paged import JobPreparationAndReleaseTaskExecutionInformationPaged +from .certificate_paged import CertificatePaged +from .node_file_paged import NodeFilePaged +from .cloud_job_schedule_paged import CloudJobSchedulePaged +from .cloud_task_paged import CloudTaskPaged +from .compute_node_paged import ComputeNodePaged +from .batch_service_client_enums import ( + OSType, + VerificationType, + AccessScope, + CertificateState, + CertificateFormat, + ContainerWorkingDirectory, + JobAction, + DependencyAction, + AutoUserScope, + ElevationLevel, + LoginMode, + OutputFileUploadCondition, + ComputeNodeFillType, + CertificateStoreLocation, + CertificateVisibility, + CachingType, + StorageAccountType, + DynamicVNetAssignmentScope, + InboundEndpointProtocol, + NetworkSecurityGroupRuleAccess, + PoolLifetimeOption, + OnAllTasksComplete, + OnTaskFailure, + JobScheduleState, + ErrorCategory, + JobState, + JobPreparationTaskState, + TaskExecutionResult, + JobReleaseTaskState, + PoolState, + AllocationState, + TaskState, + TaskAddStatus, + SubtaskState, + StartTaskState, + ComputeNodeState, + SchedulingState, + DisableJobOption, + ComputeNodeDeallocationOption, + ComputeNodeRebootOption, + ComputeNodeReimageOption, + DisableComputeNodeSchedulingOption, +) + +__all__ = [ + 'PoolUsageMetrics', + 'ImageReference', + 'ImageInformation', + 'AuthenticationTokenSettings', + 'UsageStatistics', + 'ResourceStatistics', + 'PoolStatistics', + 'JobStatistics', + 'NameValuePair', + 'DeleteCertificateError', + 'Certificate', + 'ApplicationPackageReference', + 'ApplicationSummary', + 'CertificateAddParameter', + 'FileProperties', + 'NodeFile', + 'Schedule', + 'JobConstraints', + 'JobNetworkConfiguration', + 'ContainerRegistry', + 'TaskContainerSettings', + 'ResourceFile', + 'EnvironmentSetting', + 'ExitOptions', + 'ExitCodeMapping', + 'ExitCodeRangeMapping', + 'ExitConditions', + 'AutoUserSpecification', + 'UserIdentity', + 'LinuxUserConfiguration', + 'WindowsUserConfiguration', + 'UserAccount', + 'TaskConstraints', + 'OutputFileBlobContainerDestination', + 'OutputFileDestination', + 'OutputFileUploadOptions', + 'OutputFile', + 'JobManagerTask', + 'JobPreparationTask', + 'JobReleaseTask', + 'TaskSchedulingPolicy', + 'StartTask', + 'CertificateReference', + 'MetadataItem', + 'CloudServiceConfiguration', + 'WindowsConfiguration', + 'DataDisk', + 'ContainerConfiguration', + 'VirtualMachineConfiguration', + 'NetworkSecurityGroupRule', + 'InboundNATPool', + 'PoolEndpointConfiguration', + 'NetworkConfiguration', + 'AzureBlobFileSystemConfiguration', + 'NFSMountConfiguration', + 'CIFSMountConfiguration', + 'AzureFileShareConfiguration', + 'MountConfiguration', + 'PoolSpecification', + 'AutoPoolSpecification', + 'PoolInformation', + 'JobSpecification', + 'RecentJob', + 'JobScheduleExecutionInformation', + 'JobScheduleStatistics', + 'CloudJobSchedule', + 'JobScheduleAddParameter', + 'JobSchedulingError', + 'JobExecutionInformation', + 'CloudJob', + 'JobAddParameter', + 'TaskContainerExecutionInformation', + 'TaskFailureInformation', + 'JobPreparationTaskExecutionInformation', + 'JobReleaseTaskExecutionInformation', + 'JobPreparationAndReleaseTaskExecutionInformation', + 'TaskCounts', + 'AutoScaleRunError', + 'AutoScaleRun', + 'ResizeError', + 'CloudPool', + 'PoolAddParameter', + 'AffinityInformation', + 'TaskExecutionInformation', + 'ComputeNodeInformation', + 'NodeAgentInformation', + 'MultiInstanceSettings', + 'TaskStatistics', + 'TaskIdRange', + 'TaskDependencies', + 'CloudTask', + 'TaskAddParameter', + 'TaskAddCollectionParameter', + 'ErrorMessage', + 'BatchErrorDetail', + 'BatchError', 'BatchErrorException', + 'TaskAddResult', + 'TaskAddCollectionResult', + 'SubtaskInformation', + 'CloudTaskListSubtasksResult', + 'TaskInformation', + 'StartTaskInformation', + 'ComputeNodeError', + 'InboundEndpoint', + 'ComputeNodeEndpointConfiguration', + 'ComputeNode', + 'ComputeNodeUser', + 'ComputeNodeGetRemoteLoginSettingsResult', + 'JobSchedulePatchParameter', + 'JobScheduleUpdateParameter', + 'JobDisableParameter', + 'JobTerminateParameter', + 'JobPatchParameter', + 'JobUpdateParameter', + 'PoolEnableAutoScaleParameter', + 'PoolEvaluateAutoScaleParameter', + 'PoolResizeParameter', + 'PoolUpdatePropertiesParameter', + 'PoolPatchParameter', + 'TaskUpdateParameter', + 'NodeUpdateUserParameter', + 'NodeRebootParameter', + 'NodeReimageParameter', + 'NodeDisableSchedulingParameter', + 'NodeRemoveParameter', + 'UploadBatchServiceLogsConfiguration', + 'UploadBatchServiceLogsResult', + 'NodeCounts', + 'PoolNodeCounts', + 'ApplicationListOptions', + 'ApplicationGetOptions', + 'PoolListUsageMetricsOptions', + 'PoolGetAllLifetimeStatisticsOptions', + 'PoolAddOptions', + 'PoolListOptions', + 'PoolDeleteOptions', + 'PoolExistsOptions', + 'PoolGetOptions', + 'PoolPatchOptions', + 'PoolDisableAutoScaleOptions', + 'PoolEnableAutoScaleOptions', + 'PoolEvaluateAutoScaleOptions', + 'PoolResizeOptions', + 'PoolStopResizeOptions', + 'PoolUpdatePropertiesOptions', + 'PoolRemoveNodesOptions', + 'AccountListSupportedImagesOptions', + 'AccountListPoolNodeCountsOptions', + 'JobGetAllLifetimeStatisticsOptions', + 'JobDeleteOptions', + 'JobGetOptions', + 'JobPatchOptions', + 'JobUpdateOptions', + 'JobDisableOptions', + 'JobEnableOptions', + 'JobTerminateOptions', + 'JobAddOptions', + 'JobListOptions', + 'JobListFromJobScheduleOptions', + 'JobListPreparationAndReleaseTaskStatusOptions', + 'JobGetTaskCountsOptions', + 'CertificateAddOptions', + 'CertificateListOptions', + 'CertificateCancelDeletionOptions', + 'CertificateDeleteOptions', + 'CertificateGetOptions', + 'FileDeleteFromTaskOptions', + 'FileGetFromTaskOptions', + 'FileGetPropertiesFromTaskOptions', + 'FileDeleteFromComputeNodeOptions', + 'FileGetFromComputeNodeOptions', + 'FileGetPropertiesFromComputeNodeOptions', + 'FileListFromTaskOptions', + 'FileListFromComputeNodeOptions', + 'JobScheduleExistsOptions', + 'JobScheduleDeleteOptions', + 'JobScheduleGetOptions', + 'JobSchedulePatchOptions', + 'JobScheduleUpdateOptions', + 'JobScheduleDisableOptions', + 'JobScheduleEnableOptions', + 'JobScheduleTerminateOptions', + 'JobScheduleAddOptions', + 'JobScheduleListOptions', + 'TaskAddOptions', + 'TaskListOptions', + 'TaskAddCollectionOptions', + 'TaskDeleteOptions', + 'TaskGetOptions', + 'TaskUpdateOptions', + 'TaskListSubtasksOptions', + 'TaskTerminateOptions', + 'TaskReactivateOptions', + 'ComputeNodeAddUserOptions', + 'ComputeNodeDeleteUserOptions', + 'ComputeNodeUpdateUserOptions', + 'ComputeNodeGetOptions', + 'ComputeNodeRebootOptions', + 'ComputeNodeReimageOptions', + 'ComputeNodeDisableSchedulingOptions', + 'ComputeNodeEnableSchedulingOptions', + 'ComputeNodeGetRemoteLoginSettingsOptions', + 'ComputeNodeGetRemoteDesktopOptions', + 'ComputeNodeUploadBatchServiceLogsOptions', + 'ComputeNodeListOptions', + 'ApplicationSummaryPaged', + 'PoolUsageMetricsPaged', + 'CloudPoolPaged', + 'ImageInformationPaged', + 'PoolNodeCountsPaged', + 'CloudJobPaged', + 'JobPreparationAndReleaseTaskExecutionInformationPaged', + 'CertificatePaged', + 'NodeFilePaged', + 'CloudJobSchedulePaged', + 'CloudTaskPaged', + 'ComputeNodePaged', + 'OSType', + 'VerificationType', + 'AccessScope', + 'CertificateState', + 'CertificateFormat', + 'ContainerWorkingDirectory', + 'JobAction', + 'DependencyAction', + 'AutoUserScope', + 'ElevationLevel', + 'LoginMode', + 'OutputFileUploadCondition', + 'ComputeNodeFillType', + 'CertificateStoreLocation', + 'CertificateVisibility', + 'CachingType', + 'StorageAccountType', + 'DynamicVNetAssignmentScope', + 'InboundEndpointProtocol', + 'NetworkSecurityGroupRuleAccess', + 'PoolLifetimeOption', + 'OnAllTasksComplete', + 'OnTaskFailure', + 'JobScheduleState', + 'ErrorCategory', + 'JobState', + 'JobPreparationTaskState', + 'TaskExecutionResult', + 'JobReleaseTaskState', + 'PoolState', + 'AllocationState', + 'TaskState', + 'TaskAddStatus', + 'SubtaskState', + 'StartTaskState', + 'ComputeNodeState', + 'SchedulingState', + 'DisableJobOption', + 'ComputeNodeDeallocationOption', + 'ComputeNodeRebootOption', + 'ComputeNodeReimageOption', + 'DisableComputeNodeSchedulingOption', +] diff --git a/azext/generated/sdk/batch/v2019_08_01/models/account_list_pool_node_counts_options.py b/azext/generated/sdk/batch/v2019_08_01/models/account_list_pool_node_counts_options.py new file mode 100644 index 00000000..4ad2da01 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/account_list_pool_node_counts_options.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AccountListPoolNodeCountsOptions(Model): + """Additional parameters for list_pool_node_counts operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch. + :type filter: str + :param max_results: The maximum number of items to return in the response. + Default value: 10 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(AccountListPoolNodeCountsOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.max_results = kwargs.get('max_results', 10) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/account_list_pool_node_counts_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/account_list_pool_node_counts_options_py3.py new file mode 100644 index 00000000..e9f0d02b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/account_list_pool_node_counts_options_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AccountListPoolNodeCountsOptions(Model): + """Additional parameters for list_pool_node_counts operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch. + :type filter: str + :param max_results: The maximum number of items to return in the response. + Default value: 10 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, max_results: int=10, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(AccountListPoolNodeCountsOptions, self).__init__(**kwargs) + self.filter = filter + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/account_list_supported_images_options.py b/azext/generated/sdk/batch/v2019_08_01/models/account_list_supported_images_options.py new file mode 100644 index 00000000..d6fdedf2 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/account_list_supported_images_options.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AccountListSupportedImagesOptions(Model): + """Additional parameters for list_supported_images operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 results will be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(AccountListSupportedImagesOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/account_list_supported_images_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/account_list_supported_images_options_py3.py new file mode 100644 index 00000000..35d60b10 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/account_list_supported_images_options_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AccountListSupportedImagesOptions(Model): + """Additional parameters for list_supported_images operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 results will be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(AccountListSupportedImagesOptions, self).__init__(**kwargs) + self.filter = filter + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/affinity_information.py b/azext/generated/sdk/batch/v2019_08_01/models/affinity_information.py new file mode 100644 index 00000000..c63658ec --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/affinity_information.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AffinityInformation(Model): + """A locality hint that can be used by the Batch service to select a Compute + Node on which to start a Task. + + All required parameters must be populated in order to send to Azure. + + :param affinity_id: Required. An opaque string representing the location + of a Compute Node or a Task that has run previously. You can pass the + affinityId of a Node to indicate that this Task needs to run on that + Compute Node. Note that this is just a soft affinity. If the target + Compute Node is busy or unavailable at the time the Task is scheduled, + then the Task will be scheduled elsewhere. + :type affinity_id: str + """ + + _validation = { + 'affinity_id': {'required': True}, + } + + _attribute_map = { + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(AffinityInformation, self).__init__(**kwargs) + self.affinity_id = kwargs.get('affinity_id', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/affinity_information_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/affinity_information_py3.py new file mode 100644 index 00000000..98463bb2 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/affinity_information_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AffinityInformation(Model): + """A locality hint that can be used by the Batch service to select a Compute + Node on which to start a Task. + + All required parameters must be populated in order to send to Azure. + + :param affinity_id: Required. An opaque string representing the location + of a Compute Node or a Task that has run previously. You can pass the + affinityId of a Node to indicate that this Task needs to run on that + Compute Node. Note that this is just a soft affinity. If the target + Compute Node is busy or unavailable at the time the Task is scheduled, + then the Task will be scheduled elsewhere. + :type affinity_id: str + """ + + _validation = { + 'affinity_id': {'required': True}, + } + + _attribute_map = { + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + } + + def __init__(self, *, affinity_id: str, **kwargs) -> None: + super(AffinityInformation, self).__init__(**kwargs) + self.affinity_id = affinity_id diff --git a/azext/generated/sdk/batch/v2019_08_01/models/application_get_options.py b/azext/generated/sdk/batch/v2019_08_01/models/application_get_options.py new file mode 100644 index 00000000..038c5421 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/application_get_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationGetOptions(Model): + """Additional parameters for get operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ApplicationGetOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/application_get_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/application_get_options_py3.py new file mode 100644 index 00000000..3c9d5c0a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/application_get_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationGetOptions(Model): + """Additional parameters for get operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ApplicationGetOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/application_list_options.py b/azext/generated/sdk/batch/v2019_08_01/models/application_list_options.py new file mode 100644 index 00000000..bc3ddb36 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/application_list_options.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationListOptions(Model): + """Additional parameters for list operation. + + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 applications can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ApplicationListOptions, self).__init__(**kwargs) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/application_list_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/application_list_options_py3.py new file mode 100644 index 00000000..445de51e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/application_list_options_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationListOptions(Model): + """Additional parameters for list operation. + + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 applications can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ApplicationListOptions, self).__init__(**kwargs) + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/application_package_reference.py b/azext/generated/sdk/batch/v2019_08_01/models/application_package_reference.py new file mode 100644 index 00000000..52df2028 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/application_package_reference.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationPackageReference(Model): + """A reference to an Package to be deployed to Compute Nodes. + + All required parameters must be populated in order to send to Azure. + + :param application_id: Required. The ID of the application to deploy. + :type application_id: str + :param version: The version of the application to deploy. If omitted, the + default version is deployed. If this is omitted on a Pool, and no default + version is specified for this application, the request fails with the + error code InvalidApplicationPackageReferences and HTTP status code 409. + If this is omitted on a Task, and no default version is specified for this + application, the Task fails with a pre-processing error. + :type version: str + """ + + _validation = { + 'application_id': {'required': True}, + } + + _attribute_map = { + 'application_id': {'key': 'applicationId', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ApplicationPackageReference, self).__init__(**kwargs) + self.application_id = kwargs.get('application_id', None) + self.version = kwargs.get('version', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/application_package_reference_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/application_package_reference_py3.py new file mode 100644 index 00000000..0c034391 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/application_package_reference_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationPackageReference(Model): + """A reference to an Package to be deployed to Compute Nodes. + + All required parameters must be populated in order to send to Azure. + + :param application_id: Required. The ID of the application to deploy. + :type application_id: str + :param version: The version of the application to deploy. If omitted, the + default version is deployed. If this is omitted on a Pool, and no default + version is specified for this application, the request fails with the + error code InvalidApplicationPackageReferences and HTTP status code 409. + If this is omitted on a Task, and no default version is specified for this + application, the Task fails with a pre-processing error. + :type version: str + """ + + _validation = { + 'application_id': {'required': True}, + } + + _attribute_map = { + 'application_id': {'key': 'applicationId', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + } + + def __init__(self, *, application_id: str, version: str=None, **kwargs) -> None: + super(ApplicationPackageReference, self).__init__(**kwargs) + self.application_id = application_id + self.version = version diff --git a/azext/generated/sdk/batch/v2019_08_01/models/application_summary.py b/azext/generated/sdk/batch/v2019_08_01/models/application_summary.py new file mode 100644 index 00000000..9f6bc127 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/application_summary.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationSummary(Model): + """Contains information about an application in an Azure Batch Account. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the application + within the Account. + :type id: str + :param display_name: Required. The display name for the application. + :type display_name: str + :param versions: Required. The list of available versions of the + application. + :type versions: list[str] + """ + + _validation = { + 'id': {'required': True}, + 'display_name': {'required': True}, + 'versions': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'versions': {'key': 'versions', 'type': '[str]'}, + } + + def __init__(self, **kwargs): + super(ApplicationSummary, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.versions = kwargs.get('versions', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/application_summary_paged.py b/azext/generated/sdk/batch/v2019_08_01/models/application_summary_paged.py new file mode 100644 index 00000000..64ed9c6b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/application_summary_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class ApplicationSummaryPaged(Paged): + """ + A paging container for iterating over a list of :class:`ApplicationSummary ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[ApplicationSummary]'} + } + + def __init__(self, *args, **kwargs): + + super(ApplicationSummaryPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/application_summary_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/application_summary_py3.py new file mode 100644 index 00000000..c2fa3677 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/application_summary_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationSummary(Model): + """Contains information about an application in an Azure Batch Account. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the application + within the Account. + :type id: str + :param display_name: Required. The display name for the application. + :type display_name: str + :param versions: Required. The list of available versions of the + application. + :type versions: list[str] + """ + + _validation = { + 'id': {'required': True}, + 'display_name': {'required': True}, + 'versions': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'versions': {'key': 'versions', 'type': '[str]'}, + } + + def __init__(self, *, id: str, display_name: str, versions, **kwargs) -> None: + super(ApplicationSummary, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.versions = versions diff --git a/azext/generated/sdk/batch/v2019_08_01/models/authentication_token_settings.py b/azext/generated/sdk/batch/v2019_08_01/models/authentication_token_settings.py new file mode 100644 index 00000000..f3a552a6 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/authentication_token_settings.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AuthenticationTokenSettings(Model): + """The settings for an authentication token that the Task can use to perform + Batch service operations. + + :param access: The Batch resources to which the token grants access. The + authentication token grants access to a limited set of Batch service + operations. Currently the only supported value for the access property is + 'job', which grants access to all operations related to the Job which + contains the Task. + :type access: list[str or ~azure.batch.models.AccessScope] + """ + + _attribute_map = { + 'access': {'key': 'access', 'type': '[AccessScope]'}, + } + + def __init__(self, **kwargs): + super(AuthenticationTokenSettings, self).__init__(**kwargs) + self.access = kwargs.get('access', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/authentication_token_settings_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/authentication_token_settings_py3.py new file mode 100644 index 00000000..ee605c67 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/authentication_token_settings_py3.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AuthenticationTokenSettings(Model): + """The settings for an authentication token that the Task can use to perform + Batch service operations. + + :param access: The Batch resources to which the token grants access. The + authentication token grants access to a limited set of Batch service + operations. Currently the only supported value for the access property is + 'job', which grants access to all operations related to the Job which + contains the Task. + :type access: list[str or ~azure.batch.models.AccessScope] + """ + + _attribute_map = { + 'access': {'key': 'access', 'type': '[AccessScope]'}, + } + + def __init__(self, *, access=None, **kwargs) -> None: + super(AuthenticationTokenSettings, self).__init__(**kwargs) + self.access = access diff --git a/azext/generated/sdk/batch/v2019_08_01/models/auto_pool_specification.py b/azext/generated/sdk/batch/v2019_08_01/models/auto_pool_specification.py new file mode 100644 index 00000000..2972c8cf --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/auto_pool_specification.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoPoolSpecification(Model): + """Specifies characteristics for a temporary 'auto pool'. The Batch service + will create this auto Pool when the Job is submitted. + + All required parameters must be populated in order to send to Azure. + + :param auto_pool_id_prefix: A prefix to be added to the unique identifier + when a Pool is automatically created. The Batch service assigns each auto + Pool a unique identifier on creation. To distinguish between Pools created + for different purposes, you can specify this element to add a prefix to + the ID that is assigned. The prefix can be up to 20 characters long. + :type auto_pool_id_prefix: str + :param pool_lifetime_option: Required. The minimum lifetime of created + auto Pools, and how multiple Jobs on a schedule are assigned to Pools. + Possible values include: 'jobSchedule', 'job' + :type pool_lifetime_option: str or ~azure.batch.models.PoolLifetimeOption + :param keep_alive: Whether to keep an auto Pool alive after its lifetime + expires. If false, the Batch service deletes the Pool once its lifetime + (as determined by the poolLifetimeOption setting) expires; that is, when + the Job or Job Schedule completes. If true, the Batch service does not + delete the Pool automatically. It is up to the user to delete auto Pools + created with this option. + :type keep_alive: bool + :param pool: The Pool specification for the auto Pool. + :type pool: ~azure.batch.models.PoolSpecification + """ + + _validation = { + 'pool_lifetime_option': {'required': True}, + } + + _attribute_map = { + 'auto_pool_id_prefix': {'key': 'autoPoolIdPrefix', 'type': 'str'}, + 'pool_lifetime_option': {'key': 'poolLifetimeOption', 'type': 'PoolLifetimeOption'}, + 'keep_alive': {'key': 'keepAlive', 'type': 'bool'}, + 'pool': {'key': 'pool', 'type': 'PoolSpecification'}, + } + + def __init__(self, **kwargs): + super(AutoPoolSpecification, self).__init__(**kwargs) + self.auto_pool_id_prefix = kwargs.get('auto_pool_id_prefix', None) + self.pool_lifetime_option = kwargs.get('pool_lifetime_option', None) + self.keep_alive = kwargs.get('keep_alive', None) + self.pool = kwargs.get('pool', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/auto_pool_specification_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/auto_pool_specification_py3.py new file mode 100644 index 00000000..34b01b40 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/auto_pool_specification_py3.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoPoolSpecification(Model): + """Specifies characteristics for a temporary 'auto pool'. The Batch service + will create this auto Pool when the Job is submitted. + + All required parameters must be populated in order to send to Azure. + + :param auto_pool_id_prefix: A prefix to be added to the unique identifier + when a Pool is automatically created. The Batch service assigns each auto + Pool a unique identifier on creation. To distinguish between Pools created + for different purposes, you can specify this element to add a prefix to + the ID that is assigned. The prefix can be up to 20 characters long. + :type auto_pool_id_prefix: str + :param pool_lifetime_option: Required. The minimum lifetime of created + auto Pools, and how multiple Jobs on a schedule are assigned to Pools. + Possible values include: 'jobSchedule', 'job' + :type pool_lifetime_option: str or ~azure.batch.models.PoolLifetimeOption + :param keep_alive: Whether to keep an auto Pool alive after its lifetime + expires. If false, the Batch service deletes the Pool once its lifetime + (as determined by the poolLifetimeOption setting) expires; that is, when + the Job or Job Schedule completes. If true, the Batch service does not + delete the Pool automatically. It is up to the user to delete auto Pools + created with this option. + :type keep_alive: bool + :param pool: The Pool specification for the auto Pool. + :type pool: ~azure.batch.models.PoolSpecification + """ + + _validation = { + 'pool_lifetime_option': {'required': True}, + } + + _attribute_map = { + 'auto_pool_id_prefix': {'key': 'autoPoolIdPrefix', 'type': 'str'}, + 'pool_lifetime_option': {'key': 'poolLifetimeOption', 'type': 'PoolLifetimeOption'}, + 'keep_alive': {'key': 'keepAlive', 'type': 'bool'}, + 'pool': {'key': 'pool', 'type': 'PoolSpecification'}, + } + + def __init__(self, *, pool_lifetime_option, auto_pool_id_prefix: str=None, keep_alive: bool=None, pool=None, **kwargs) -> None: + super(AutoPoolSpecification, self).__init__(**kwargs) + self.auto_pool_id_prefix = auto_pool_id_prefix + self.pool_lifetime_option = pool_lifetime_option + self.keep_alive = keep_alive + self.pool = pool diff --git a/azext/generated/sdk/batch/v2019_08_01/models/auto_scale_run.py b/azext/generated/sdk/batch/v2019_08_01/models/auto_scale_run.py new file mode 100644 index 00000000..28e11e8d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/auto_scale_run.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoScaleRun(Model): + """The results and errors from an execution of a Pool autoscale formula. + + All required parameters must be populated in order to send to Azure. + + :param timestamp: Required. The time at which the autoscale formula was + last evaluated. + :type timestamp: datetime + :param results: The final values of all variables used in the evaluation + of the autoscale formula. Each variable value is returned in the form + $variable=value, and variables are separated by semicolons. + :type results: str + :param error: Details of the error encountered evaluating the autoscale + formula on the Pool, if the evaluation was unsuccessful. + :type error: ~azure.batch.models.AutoScaleRunError + """ + + _validation = { + 'timestamp': {'required': True}, + } + + _attribute_map = { + 'timestamp': {'key': 'timestamp', 'type': 'iso-8601'}, + 'results': {'key': 'results', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'AutoScaleRunError'}, + } + + def __init__(self, **kwargs): + super(AutoScaleRun, self).__init__(**kwargs) + self.timestamp = kwargs.get('timestamp', None) + self.results = kwargs.get('results', None) + self.error = kwargs.get('error', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/auto_scale_run_error.py b/azext/generated/sdk/batch/v2019_08_01/models/auto_scale_run_error.py new file mode 100644 index 00000000..542fe623 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/auto_scale_run_error.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoScaleRunError(Model): + """An error that occurred when executing or evaluating a Pool autoscale + formula. + + :param code: An identifier for the autoscale error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the autoscale error, intended to be + suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the autoscale + error. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(AutoScaleRunError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.values = kwargs.get('values', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/auto_scale_run_error_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/auto_scale_run_error_py3.py new file mode 100644 index 00000000..62b79622 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/auto_scale_run_error_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoScaleRunError(Model): + """An error that occurred when executing or evaluating a Pool autoscale + formula. + + :param code: An identifier for the autoscale error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the autoscale error, intended to be + suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the autoscale + error. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, code: str=None, message: str=None, values=None, **kwargs) -> None: + super(AutoScaleRunError, self).__init__(**kwargs) + self.code = code + self.message = message + self.values = values diff --git a/azext/generated/sdk/batch/v2019_08_01/models/auto_scale_run_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/auto_scale_run_py3.py new file mode 100644 index 00000000..7607b04f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/auto_scale_run_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoScaleRun(Model): + """The results and errors from an execution of a Pool autoscale formula. + + All required parameters must be populated in order to send to Azure. + + :param timestamp: Required. The time at which the autoscale formula was + last evaluated. + :type timestamp: datetime + :param results: The final values of all variables used in the evaluation + of the autoscale formula. Each variable value is returned in the form + $variable=value, and variables are separated by semicolons. + :type results: str + :param error: Details of the error encountered evaluating the autoscale + formula on the Pool, if the evaluation was unsuccessful. + :type error: ~azure.batch.models.AutoScaleRunError + """ + + _validation = { + 'timestamp': {'required': True}, + } + + _attribute_map = { + 'timestamp': {'key': 'timestamp', 'type': 'iso-8601'}, + 'results': {'key': 'results', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'AutoScaleRunError'}, + } + + def __init__(self, *, timestamp, results: str=None, error=None, **kwargs) -> None: + super(AutoScaleRun, self).__init__(**kwargs) + self.timestamp = timestamp + self.results = results + self.error = error diff --git a/azext/generated/sdk/batch/v2019_08_01/models/auto_user_specification.py b/azext/generated/sdk/batch/v2019_08_01/models/auto_user_specification.py new file mode 100644 index 00000000..7c0ac49b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/auto_user_specification.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoUserSpecification(Model): + """Specifies the parameters for the auto user that runs a Task on the Batch + service. + + :param scope: The scope for the auto user. The default value is pool. If + the pool is running Windows a value of Task should be specified if + stricter isolation between tasks is required. For example, if the task + mutates the registry in a way which could impact other tasks, or if + certificates have been specified on the pool which should not be + accessible by normal tasks but should be accessible by StartTasks. + Possible values include: 'task', 'pool' + :type scope: str or ~azure.batch.models.AutoUserScope + :param elevation_level: The elevation level of the auto user. The default + value is nonAdmin. Possible values include: 'nonAdmin', 'admin' + :type elevation_level: str or ~azure.batch.models.ElevationLevel + """ + + _attribute_map = { + 'scope': {'key': 'scope', 'type': 'AutoUserScope'}, + 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, + } + + def __init__(self, **kwargs): + super(AutoUserSpecification, self).__init__(**kwargs) + self.scope = kwargs.get('scope', None) + self.elevation_level = kwargs.get('elevation_level', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/auto_user_specification_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/auto_user_specification_py3.py new file mode 100644 index 00000000..6f4db407 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/auto_user_specification_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AutoUserSpecification(Model): + """Specifies the parameters for the auto user that runs a Task on the Batch + service. + + :param scope: The scope for the auto user. The default value is pool. If + the pool is running Windows a value of Task should be specified if + stricter isolation between tasks is required. For example, if the task + mutates the registry in a way which could impact other tasks, or if + certificates have been specified on the pool which should not be + accessible by normal tasks but should be accessible by StartTasks. + Possible values include: 'task', 'pool' + :type scope: str or ~azure.batch.models.AutoUserScope + :param elevation_level: The elevation level of the auto user. The default + value is nonAdmin. Possible values include: 'nonAdmin', 'admin' + :type elevation_level: str or ~azure.batch.models.ElevationLevel + """ + + _attribute_map = { + 'scope': {'key': 'scope', 'type': 'AutoUserScope'}, + 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, + } + + def __init__(self, *, scope=None, elevation_level=None, **kwargs) -> None: + super(AutoUserSpecification, self).__init__(**kwargs) + self.scope = scope + self.elevation_level = elevation_level diff --git a/azext/generated/sdk/batch/v2019_08_01/models/azure_blob_file_system_configuration.py b/azext/generated/sdk/batch/v2019_08_01/models/azure_blob_file_system_configuration.py new file mode 100644 index 00000000..5c59d343 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/azure_blob_file_system_configuration.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AzureBlobFileSystemConfiguration(Model): + """Information used to connect to an Azure Storage Container using Blobfuse. + + All required parameters must be populated in order to send to Azure. + + :param account_name: Required. The Azure Storage Account name. + :type account_name: str + :param container_name: Required. The Azure Blob Storage Container name. + :type container_name: str + :param account_key: The Azure Storage Account key. This property is + mutually exclusive with sasKey and one must be specified. + :type account_key: str + :param sas_key: The Azure Storage SAS token. This property is mutually + exclusive with accountKey and one must be specified. + :type sas_key: str + :param blobfuse_options: Additional command line options to pass to the + mount command. These are 'net use' options in Windows and 'mount' options + in Linux. + :type blobfuse_options: str + :param relative_mount_path: Required. The relative path on the compute + node where the file system will be mounted. All file systems are mounted + relative to the Batch mounts directory, accessible via the + AZ_BATCH_NODE_MOUNTS_DIR environment variable. + :type relative_mount_path: str + """ + + _validation = { + 'account_name': {'required': True}, + 'container_name': {'required': True}, + 'relative_mount_path': {'required': True}, + } + + _attribute_map = { + 'account_name': {'key': 'accountName', 'type': 'str'}, + 'container_name': {'key': 'containerName', 'type': 'str'}, + 'account_key': {'key': 'accountKey', 'type': 'str'}, + 'sas_key': {'key': 'sasKey', 'type': 'str'}, + 'blobfuse_options': {'key': 'blobfuseOptions', 'type': 'str'}, + 'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(AzureBlobFileSystemConfiguration, self).__init__(**kwargs) + self.account_name = kwargs.get('account_name', None) + self.container_name = kwargs.get('container_name', None) + self.account_key = kwargs.get('account_key', None) + self.sas_key = kwargs.get('sas_key', None) + self.blobfuse_options = kwargs.get('blobfuse_options', None) + self.relative_mount_path = kwargs.get('relative_mount_path', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/azure_blob_file_system_configuration_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/azure_blob_file_system_configuration_py3.py new file mode 100644 index 00000000..075c17da --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/azure_blob_file_system_configuration_py3.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AzureBlobFileSystemConfiguration(Model): + """Information used to connect to an Azure Storage Container using Blobfuse. + + All required parameters must be populated in order to send to Azure. + + :param account_name: Required. The Azure Storage Account name. + :type account_name: str + :param container_name: Required. The Azure Blob Storage Container name. + :type container_name: str + :param account_key: The Azure Storage Account key. This property is + mutually exclusive with sasKey and one must be specified. + :type account_key: str + :param sas_key: The Azure Storage SAS token. This property is mutually + exclusive with accountKey and one must be specified. + :type sas_key: str + :param blobfuse_options: Additional command line options to pass to the + mount command. These are 'net use' options in Windows and 'mount' options + in Linux. + :type blobfuse_options: str + :param relative_mount_path: Required. The relative path on the compute + node where the file system will be mounted. All file systems are mounted + relative to the Batch mounts directory, accessible via the + AZ_BATCH_NODE_MOUNTS_DIR environment variable. + :type relative_mount_path: str + """ + + _validation = { + 'account_name': {'required': True}, + 'container_name': {'required': True}, + 'relative_mount_path': {'required': True}, + } + + _attribute_map = { + 'account_name': {'key': 'accountName', 'type': 'str'}, + 'container_name': {'key': 'containerName', 'type': 'str'}, + 'account_key': {'key': 'accountKey', 'type': 'str'}, + 'sas_key': {'key': 'sasKey', 'type': 'str'}, + 'blobfuse_options': {'key': 'blobfuseOptions', 'type': 'str'}, + 'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'}, + } + + def __init__(self, *, account_name: str, container_name: str, relative_mount_path: str, account_key: str=None, sas_key: str=None, blobfuse_options: str=None, **kwargs) -> None: + super(AzureBlobFileSystemConfiguration, self).__init__(**kwargs) + self.account_name = account_name + self.container_name = container_name + self.account_key = account_key + self.sas_key = sas_key + self.blobfuse_options = blobfuse_options + self.relative_mount_path = relative_mount_path diff --git a/azext/generated/sdk/batch/v2019_08_01/models/azure_file_share_configuration.py b/azext/generated/sdk/batch/v2019_08_01/models/azure_file_share_configuration.py new file mode 100644 index 00000000..a917e6fe --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/azure_file_share_configuration.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AzureFileShareConfiguration(Model): + """Information used to connect to an Azure Fileshare. + + All required parameters must be populated in order to send to Azure. + + :param account_name: Required. The Azure Storage account name. + :type account_name: str + :param azure_file_url: Required. The Azure Files URL. This is of the form + 'https://{account}.file.core.windows.net/'. + :type azure_file_url: str + :param account_key: Required. The Azure Storage account key. + :type account_key: str + :param relative_mount_path: Required. The relative path on the compute + node where the file system will be mounted. All file systems are mounted + relative to the Batch mounts directory, accessible via the + AZ_BATCH_NODE_MOUNTS_DIR environment variable. + :type relative_mount_path: str + :param mount_options: Additional command line options to pass to the mount + command. These are 'net use' options in Windows and 'mount' options in + Linux. + :type mount_options: str + """ + + _validation = { + 'account_name': {'required': True}, + 'azure_file_url': {'required': True}, + 'account_key': {'required': True}, + 'relative_mount_path': {'required': True}, + } + + _attribute_map = { + 'account_name': {'key': 'accountName', 'type': 'str'}, + 'azure_file_url': {'key': 'azureFileUrl', 'type': 'str'}, + 'account_key': {'key': 'accountKey', 'type': 'str'}, + 'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'}, + 'mount_options': {'key': 'mountOptions', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(AzureFileShareConfiguration, self).__init__(**kwargs) + self.account_name = kwargs.get('account_name', None) + self.azure_file_url = kwargs.get('azure_file_url', None) + self.account_key = kwargs.get('account_key', None) + self.relative_mount_path = kwargs.get('relative_mount_path', None) + self.mount_options = kwargs.get('mount_options', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/azure_file_share_configuration_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/azure_file_share_configuration_py3.py new file mode 100644 index 00000000..cee1d99b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/azure_file_share_configuration_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AzureFileShareConfiguration(Model): + """Information used to connect to an Azure Fileshare. + + All required parameters must be populated in order to send to Azure. + + :param account_name: Required. The Azure Storage account name. + :type account_name: str + :param azure_file_url: Required. The Azure Files URL. This is of the form + 'https://{account}.file.core.windows.net/'. + :type azure_file_url: str + :param account_key: Required. The Azure Storage account key. + :type account_key: str + :param relative_mount_path: Required. The relative path on the compute + node where the file system will be mounted. All file systems are mounted + relative to the Batch mounts directory, accessible via the + AZ_BATCH_NODE_MOUNTS_DIR environment variable. + :type relative_mount_path: str + :param mount_options: Additional command line options to pass to the mount + command. These are 'net use' options in Windows and 'mount' options in + Linux. + :type mount_options: str + """ + + _validation = { + 'account_name': {'required': True}, + 'azure_file_url': {'required': True}, + 'account_key': {'required': True}, + 'relative_mount_path': {'required': True}, + } + + _attribute_map = { + 'account_name': {'key': 'accountName', 'type': 'str'}, + 'azure_file_url': {'key': 'azureFileUrl', 'type': 'str'}, + 'account_key': {'key': 'accountKey', 'type': 'str'}, + 'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'}, + 'mount_options': {'key': 'mountOptions', 'type': 'str'}, + } + + def __init__(self, *, account_name: str, azure_file_url: str, account_key: str, relative_mount_path: str, mount_options: str=None, **kwargs) -> None: + super(AzureFileShareConfiguration, self).__init__(**kwargs) + self.account_name = account_name + self.azure_file_url = azure_file_url + self.account_key = account_key + self.relative_mount_path = relative_mount_path + self.mount_options = mount_options diff --git a/azext/generated/sdk/batch/v2019_08_01/models/batch_error.py b/azext/generated/sdk/batch/v2019_08_01/models/batch_error.py new file mode 100644 index 00000000..3857ac96 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/batch_error.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from msrest.exceptions import HttpOperationError + + +class BatchError(Model): + """An error response received from the Azure Batch service. + + :param code: An identifier for the error. Codes are invariant and are + intended to be consumed programmatically. + :type code: str + :param message: A message describing the error, intended to be suitable + for display in a user interface. + :type message: ~azure.batch.models.ErrorMessage + :param values: A collection of key-value pairs containing additional + details about the error. + :type values: list[~azure.batch.models.BatchErrorDetail] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'ErrorMessage'}, + 'values': {'key': 'values', 'type': '[BatchErrorDetail]'}, + } + + def __init__(self, **kwargs): + super(BatchError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.values = kwargs.get('values', None) + + +class BatchErrorException(HttpOperationError): + """Server responsed with exception of type: 'BatchError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, deserialize, response, *args): + + super(BatchErrorException, self).__init__(deserialize, response, 'BatchError', *args) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/batch_error_detail.py b/azext/generated/sdk/batch/v2019_08_01/models/batch_error_detail.py new file mode 100644 index 00000000..a892678c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/batch_error_detail.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BatchErrorDetail(Model): + """An item of additional information included in an Azure Batch error + response. + + :param key: An identifier specifying the meaning of the Value property. + :type key: str + :param value: The additional information included with the error response. + :type value: str + """ + + _attribute_map = { + 'key': {'key': 'key', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(BatchErrorDetail, self).__init__(**kwargs) + self.key = kwargs.get('key', None) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/batch_error_detail_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/batch_error_detail_py3.py new file mode 100644 index 00000000..8aa8a85b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/batch_error_detail_py3.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BatchErrorDetail(Model): + """An item of additional information included in an Azure Batch error + response. + + :param key: An identifier specifying the meaning of the Value property. + :type key: str + :param value: The additional information included with the error response. + :type value: str + """ + + _attribute_map = { + 'key': {'key': 'key', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, *, key: str=None, value: str=None, **kwargs) -> None: + super(BatchErrorDetail, self).__init__(**kwargs) + self.key = key + self.value = value diff --git a/azext/generated/sdk/batch/v2019_08_01/models/batch_error_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/batch_error_py3.py new file mode 100644 index 00000000..a6e49569 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/batch_error_py3.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from msrest.exceptions import HttpOperationError + + +class BatchError(Model): + """An error response received from the Azure Batch service. + + :param code: An identifier for the error. Codes are invariant and are + intended to be consumed programmatically. + :type code: str + :param message: A message describing the error, intended to be suitable + for display in a user interface. + :type message: ~azure.batch.models.ErrorMessage + :param values: A collection of key-value pairs containing additional + details about the error. + :type values: list[~azure.batch.models.BatchErrorDetail] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'ErrorMessage'}, + 'values': {'key': 'values', 'type': '[BatchErrorDetail]'}, + } + + def __init__(self, *, code: str=None, message=None, values=None, **kwargs) -> None: + super(BatchError, self).__init__(**kwargs) + self.code = code + self.message = message + self.values = values + + +class BatchErrorException(HttpOperationError): + """Server responsed with exception of type: 'BatchError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, deserialize, response, *args): + + super(BatchErrorException, self).__init__(deserialize, response, 'BatchError', *args) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/batch_service_client_enums.py b/azext/generated/sdk/batch/v2019_08_01/models/batch_service_client_enums.py new file mode 100644 index 00000000..595aebd8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/batch_service_client_enums.py @@ -0,0 +1,300 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum + + +class OSType(str, Enum): + + linux = "linux" #: The Linux operating system. + windows = "windows" #: The Windows operating system. + + +class VerificationType(str, Enum): + + verified = "verified" #: The Image is guaranteed to be compatible with the associated Compute Node agent SKU and all Batch features have been confirmed to work as expected. + unverified = "unverified" #: The associated Compute Node agent SKU should have binary compatibility with the Image, but specific functionality has not been verified. + + +class AccessScope(str, Enum): + + job = "job" #: Grants access to perform all operations on the Job containing the Task. + + +class CertificateState(str, Enum): + + active = "active" #: The Certificate is available for use in Pools. + deleting = "deleting" #: The user has requested that the Certificate be deleted, but the delete operation has not yet completed. You may not reference the Certificate when creating or updating Pools. + delete_failed = "deletefailed" #: The user requested that the Certificate be deleted, but there are Pools that still have references to the Certificate, or it is still installed on one or more Nodes. (The latter can occur if the Certificate has been removed from the Pool, but the Compute Node has not yet restarted. Compute Nodes refresh their Certificates only when they restart.) You may use the cancel Certificate delete operation to cancel the delete, or the delete Certificate operation to retry the delete. + + +class CertificateFormat(str, Enum): + + pfx = "pfx" #: The Certificate is a PFX (PKCS#12) formatted Certificate or Certificate chain. + cer = "cer" #: The Certificate is a base64-encoded X.509 Certificate. + + +class ContainerWorkingDirectory(str, Enum): + + task_working_directory = "taskWorkingDirectory" #: Use the standard Batch service Task working directory, which will contain the Task Resource Files populated by Batch. + container_image_default = "containerImageDefault" #: Use the working directory defined in the container Image. Beware that this directory will not contain the Resource Files downloaded by Batch. + + +class JobAction(str, Enum): + + none = "none" #: Take no action. + disable = "disable" #: Disable the Job. This is equivalent to calling the disable Job API, with a disableTasks value of requeue. + terminate = "terminate" #: Terminate the Job. The terminateReason in the Job's executionInfo is set to "TaskFailed". + + +class DependencyAction(str, Enum): + + satisfy = "satisfy" #: Satisfy tasks waiting on this task; once all dependencies are satisfied, the task will be scheduled to run. + block = "block" #: Blocks tasks waiting on this task, preventing them from being scheduled. + + +class AutoUserScope(str, Enum): + + task = "task" #: Specifies that the service should create a new user for the Task. + pool = "pool" #: Specifies that the Task runs as the common auto user Account which is created on every Compute Node in a Pool. + + +class ElevationLevel(str, Enum): + + non_admin = "nonadmin" #: The user is a standard user without elevated access. + admin = "admin" #: The user is a user with elevated access and operates with full Administrator permissions. + + +class LoginMode(str, Enum): + + batch = "batch" #: The LOGON32_LOGON_BATCH Win32 login mode. The batch login mode is recommended for long running parallel processes. + interactive = "interactive" #: The LOGON32_LOGON_INTERACTIVE Win32 login mode. UAC is enabled on Windows VirtualMachineConfiguration Pools. If this option is used with an elevated user identity in a Windows VirtualMachineConfiguration Pool, the user session will not be elevated unless the application executed by the Task command line is configured to always require administrative privilege or to always require maximum privilege. + + +class OutputFileUploadCondition(str, Enum): + + task_success = "tasksuccess" #: Upload the file(s) only after the Task process exits with an exit code of 0. + task_failure = "taskfailure" #: Upload the file(s) only after the Task process exits with a nonzero exit code. + task_completion = "taskcompletion" #: Upload the file(s) after the Task process exits, no matter what the exit code was. + + +class ComputeNodeFillType(str, Enum): + + spread = "spread" #: Tasks should be assigned evenly across all Compute Nodes in the Pool. + pack = "pack" #: As many Tasks as possible (maxTasksPerNode) should be assigned to each Compute Node in the Pool before any Tasks are assigned to the next Compute Node in the Pool. + + +class CertificateStoreLocation(str, Enum): + + current_user = "currentuser" #: Certificates should be installed to the CurrentUser Certificate store. + local_machine = "localmachine" #: Certificates should be installed to the LocalMachine Certificate store. + + +class CertificateVisibility(str, Enum): + + start_task = "starttask" #: The Certificate should be visible to the user account under which the StartTask is run. Note that if AutoUser Scope is Pool for both the StartTask and a Task, this certificate will be visible to the Task as well. + task = "task" #: The Certificate should be visible to the user accounts under which Job Tasks are run. + remote_user = "remoteuser" #: The Certificate should be visible to the user accounts under which users remotely access the Compute Node. + + +class CachingType(str, Enum): + + none = "none" #: The caching mode for the disk is not enabled. + read_only = "readonly" #: The caching mode for the disk is read only. + read_write = "readwrite" #: The caching mode for the disk is read and write. + + +class StorageAccountType(str, Enum): + + standard_lrs = "standard_lrs" #: The data disk should use standard locally redundant storage. + premium_lrs = "premium_lrs" #: The data disk should use premium locally redundant storage. + + +class DynamicVNetAssignmentScope(str, Enum): + + none = "none" #: No dynamic VNet assignment is enabled. + job = "job" #: Dynamic VNet assignment is done per-job. + + +class InboundEndpointProtocol(str, Enum): + + tcp = "tcp" #: Use TCP for the endpoint. + udp = "udp" #: Use UDP for the endpoint. + + +class NetworkSecurityGroupRuleAccess(str, Enum): + + allow = "allow" #: Allow access. + deny = "deny" #: Deny access. + + +class PoolLifetimeOption(str, Enum): + + job_schedule = "jobschedule" #: The Pool exists for the lifetime of the Job Schedule. The Batch Service creates the Pool when it creates the first Job on the schedule. You may apply this option only to Job Schedules, not to Jobs. + job = "job" #: The Pool exists for the lifetime of the Job to which it is dedicated. The Batch service creates the Pool when it creates the Job. If the 'job' option is applied to a Job Schedule, the Batch service creates a new auto Pool for every Job created on the schedule. + + +class OnAllTasksComplete(str, Enum): + + no_action = "noaction" #: Do nothing. The Job remains active unless terminated or disabled by some other means. + terminate_job = "terminatejob" #: Terminate the Job. The Job's terminateReason is set to 'AllTasksComplete'. + + +class OnTaskFailure(str, Enum): + + no_action = "noaction" #: Do nothing. The Job remains active unless terminated or disabled by some other means. + perform_exit_options_job_action = "performexitoptionsjobaction" #: Take the action associated with the Task exit condition in the Task's exitConditions collection. (This may still result in no action being taken, if that is what the Task specifies.) + + +class JobScheduleState(str, Enum): + + active = "active" #: The Job Schedule is active and will create Jobs as per its schedule. + completed = "completed" #: The Job Schedule has terminated, either by reaching its end time or by the user terminating it explicitly. + disabled = "disabled" #: The user has disabled the Job Schedule. The scheduler will not initiate any new Jobs will on this schedule, but any existing active Job will continue to run. + terminating = "terminating" #: The Job Schedule has no more work to do, or has been explicitly terminated by the user, but the termination operation is still in progress. The scheduler will not initiate any new Jobs for this Job Schedule, nor is any existing Job active. + deleting = "deleting" #: The user has requested that the Job Schedule be deleted, but the delete operation is still in progress. The scheduler will not initiate any new Jobs for this Job Schedule, and will delete any existing Jobs and Tasks under the Job Schedule, including any active Job. The Job Schedule will be deleted when all Jobs and Tasks under the Job Schedule have been deleted. + + +class ErrorCategory(str, Enum): + + user_error = "usererror" #: The error is due to a user issue, such as misconfiguration. + server_error = "servererror" #: The error is due to an internal server issue. + + +class JobState(str, Enum): + + active = "active" #: The Job is available to have Tasks scheduled. + disabling = "disabling" #: A user has requested that the Job be disabled, but the disable operation is still in progress (for example, waiting for Tasks to terminate). + disabled = "disabled" #: A user has disabled the Job. No Tasks are running, and no new Tasks will be scheduled. + enabling = "enabling" #: A user has requested that the Job be enabled, but the enable operation is still in progress. + terminating = "terminating" #: The Job is about to complete, either because a Job Manager Task has completed or because the user has terminated the Job, but the terminate operation is still in progress (for example, because Job Release Tasks are running). + completed = "completed" #: All Tasks have terminated, and the system will not accept any more Tasks or any further changes to the Job. + deleting = "deleting" #: A user has requested that the Job be deleted, but the delete operation is still in progress (for example, because the system is still terminating running Tasks). + + +class JobPreparationTaskState(str, Enum): + + running = "running" #: The Task is currently running (including retrying). + completed = "completed" #: The Task has exited with exit code 0, or the Task has exhausted its retry limit, or the Batch service was unable to start the Task due to Task preparation errors (such as resource file download failures). + + +class TaskExecutionResult(str, Enum): + + success = "success" #: The Task ran successfully. + failure = "failure" #: There was an error during processing of the Task. The failure may have occurred before the Task process was launched, while the Task process was executing, or after the Task process exited. + + +class JobReleaseTaskState(str, Enum): + + running = "running" #: The Task is currently running (including retrying). + completed = "completed" #: The Task has exited with exit code 0, or the Task has exhausted its retry limit, or the Batch service was unable to start the Task due to Task preparation errors (such as resource file download failures). + + +class PoolState(str, Enum): + + active = "active" #: The Pool is available to run Tasks subject to the availability of Compute Nodes. + deleting = "deleting" #: The user has requested that the Pool be deleted, but the delete operation has not yet completed. + + +class AllocationState(str, Enum): + + steady = "steady" #: The Pool is not resizing. There are no changes to the number of Compute Nodes in the Pool in progress. A Pool enters this state when it is created and when no operations are being performed on the Pool to change the number of Compute Nodes. + resizing = "resizing" #: The Pool is resizing; that is, Compute Nodes are being added to or removed from the Pool. + stopping = "stopping" #: The Pool was resizing, but the user has requested that the resize be stopped, but the stop request has not yet been completed. + + +class TaskState(str, Enum): + + active = "active" #: The Task is queued and able to run, but is not currently assigned to a Compute Node. A Task enters this state when it is created, when it is enabled after being disabled, or when it is awaiting a retry after a failed run. + preparing = "preparing" #: The Task has been assigned to a Compute Node, but is waiting for a required Job Preparation Task to complete on the Compute Node. If the Job Preparation Task succeeds, the Task will move to running. If the Job Preparation Task fails, the Task will return to active and will be eligible to be assigned to a different Compute Node. + running = "running" #: The Task is running on a Compute Node. This includes task-level preparation such as downloading resource files or deploying Packages specified on the Task - it does not necessarily mean that the Task command line has started executing. + completed = "completed" #: The Task is no longer eligible to run, usually because the Task has finished successfully, or the Task has finished unsuccessfully and has exhausted its retry limit. A Task is also marked as completed if an error occurred launching the Task, or when the Task has been terminated. + + +class TaskAddStatus(str, Enum): + + success = "success" #: The Task was added successfully. + client_error = "clienterror" #: The Task failed to add due to a client error and should not be retried without modifying the request as appropriate. + server_error = "servererror" #: Task failed to add due to a server error and can be retried without modification. + + +class SubtaskState(str, Enum): + + preparing = "preparing" #: The Task has been assigned to a Compute Node, but is waiting for a required Job Preparation Task to complete on the Compute Node. If the Job Preparation Task succeeds, the Task will move to running. If the Job Preparation Task fails, the Task will return to active and will be eligible to be assigned to a different Compute Node. + running = "running" #: The Task is running on a Compute Node. This includes task-level preparation such as downloading resource files or deploying Packages specified on the Task - it does not necessarily mean that the Task command line has started executing. + completed = "completed" #: The Task is no longer eligible to run, usually because the Task has finished successfully, or the Task has finished unsuccessfully and has exhausted its retry limit. A Task is also marked as completed if an error occurred launching the Task, or when the Task has been terminated. + + +class StartTaskState(str, Enum): + + running = "running" #: The StartTask is currently running. + completed = "completed" #: The StartTask has exited with exit code 0, or the StartTask has failed and the retry limit has reached, or the StartTask process did not run due to Task preparation errors (such as resource file download failures). + + +class ComputeNodeState(str, Enum): + + idle = "idle" #: The Compute Node is not currently running a Task. + rebooting = "rebooting" #: The Compute Node is rebooting. + reimaging = "reimaging" #: The Compute Node is reimaging. + running = "running" #: The Compute Node is running one or more Tasks (other than a StartTask). + unusable = "unusable" #: The Compute Node cannot be used for Task execution due to errors. + creating = "creating" #: The Batch service has obtained the underlying virtual machine from Azure Compute, but it has not yet started to join the Pool. + starting = "starting" #: The Batch service is starting on the underlying virtual machine. + waiting_for_start_task = "waitingforstarttask" #: The StartTask has started running on the Compute Node, but waitForSuccess is set and the StartTask has not yet completed. + start_task_failed = "starttaskfailed" #: The StartTask has failed on the Compute Node (and exhausted all retries), and waitForSuccess is set. The Compute Node is not usable for running Tasks. + unknown = "unknown" #: The Batch service has lost contact with the Compute Node, and does not know its true state. + leaving_pool = "leavingpool" #: The Compute Node is leaving the Pool, either because the user explicitly removed it or because the Pool is resizing or autoscaling down. + offline = "offline" #: The Compute Node is not currently running a Task, and scheduling of new Tasks to the Compute Node is disabled. + preempted = "preempted" #: The low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available. + + +class SchedulingState(str, Enum): + + enabled = "enabled" #: Tasks can be scheduled on the Compute Node. + disabled = "disabled" #: No new Tasks will be scheduled on the Compute Node. Tasks already running on the Compute Node may still run to completion. All Compute Nodes start with scheduling enabled. + + +class DisableJobOption(str, Enum): + + requeue = "requeue" #: Terminate running Tasks and requeue them. The Tasks will run again when the Job is enabled. + terminate = "terminate" #: Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. + wait = "wait" #: Allow currently running Tasks to complete. + + +class ComputeNodeDeallocationOption(str, Enum): + + requeue = "requeue" #: Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute Node is available. Remove Compute Nodes as soon as Tasks have been terminated. + terminate = "terminate" #: Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Remove Compute Nodes as soon as Tasks have been terminated. + task_completion = "taskcompletion" #: Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Remove Compute Nodes when all Tasks have completed. + retained_data = "retaineddata" #: Allow currently running Tasks to complete, then wait for all Task data retention periods to expire. Schedule no new Tasks while waiting. Remove Compute Nodes when all Task retention periods have expired. + + +class ComputeNodeRebootOption(str, Enum): + + requeue = "requeue" #: Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute Node is available. Restart the Compute Node as soon as Tasks have been terminated. + terminate = "terminate" #: Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Restart the Compute Node as soon as Tasks have been terminated. + task_completion = "taskcompletion" #: Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Restart the Compute Node when all Tasks have completed. + retained_data = "retaineddata" #: Allow currently running Tasks to complete, then wait for all Task data retention periods to expire. Schedule no new Tasks while waiting. Restart the Compute Node when all Task retention periods have expired. + + +class ComputeNodeReimageOption(str, Enum): + + requeue = "requeue" #: Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute Node is available. Reimage the Compute Node as soon as Tasks have been terminated. + terminate = "terminate" #: Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Reimage the Compute Node as soon as Tasks have been terminated. + task_completion = "taskcompletion" #: Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Reimage the Compute Node when all Tasks have completed. + retained_data = "retaineddata" #: Allow currently running Tasks to complete, then wait for all Task data retention periods to expire. Schedule no new Tasks while waiting. Reimage the Compute Node when all Task retention periods have expired. + + +class DisableComputeNodeSchedulingOption(str, Enum): + + requeue = "requeue" #: Terminate running Task processes and requeue the Tasks. The Tasks may run again on other Compute Nodes, or when Task scheduling is re-enabled on this Compute Node. Enter offline state as soon as Tasks have been terminated. + terminate = "terminate" #: Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Enter offline state as soon as Tasks have been terminated. + task_completion = "taskcompletion" #: Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Enter offline state when all Tasks have completed. diff --git a/azext/generated/sdk/batch/v2019_08_01/models/certificate.py b/azext/generated/sdk/batch/v2019_08_01/models/certificate.py new file mode 100644 index 00000000..b51ca681 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/certificate.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Certificate(Model): + """A Certificate that can be installed on Compute Nodes and can be used to + authenticate operations on the machine. + + :param thumbprint: The X.509 thumbprint of the Certificate. This is a + sequence of up to 40 hex digits. + :type thumbprint: str + :param thumbprint_algorithm: The algorithm used to derive the thumbprint. + :type thumbprint_algorithm: str + :param url: The URL of the Certificate. + :type url: str + :param state: The current state of the Certificate. Possible values + include: 'active', 'deleting', 'deleteFailed' + :type state: str or ~azure.batch.models.CertificateState + :param state_transition_time: The time at which the Certificate entered + its current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the Certificate. This + property is not set if the Certificate is in its initial active state. + Possible values include: 'active', 'deleting', 'deleteFailed' + :type previous_state: str or ~azure.batch.models.CertificateState + :param previous_state_transition_time: The time at which the Certificate + entered its previous state. This property is not set if the Certificate is + in its initial Active state. + :type previous_state_transition_time: datetime + :param public_data: The public part of the Certificate as a base-64 + encoded .cer file. + :type public_data: str + :param delete_certificate_error: The error that occurred on the last + attempt to delete this Certificate. This property is set only if the + Certificate is in the DeleteFailed state. + :type delete_certificate_error: ~azure.batch.models.DeleteCertificateError + """ + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'CertificateState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'CertificateState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'public_data': {'key': 'publicData', 'type': 'str'}, + 'delete_certificate_error': {'key': 'deleteCertificateError', 'type': 'DeleteCertificateError'}, + } + + def __init__(self, **kwargs): + super(Certificate, self).__init__(**kwargs) + self.thumbprint = kwargs.get('thumbprint', None) + self.thumbprint_algorithm = kwargs.get('thumbprint_algorithm', None) + self.url = kwargs.get('url', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.previous_state = kwargs.get('previous_state', None) + self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) + self.public_data = kwargs.get('public_data', None) + self.delete_certificate_error = kwargs.get('delete_certificate_error', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/certificate_add_options.py b/azext/generated/sdk/batch/v2019_08_01/models/certificate_add_options.py new file mode 100644 index 00000000..f2c8d5bb --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/certificate_add_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(CertificateAddOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/certificate_add_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/certificate_add_options_py3.py new file mode 100644 index 00000000..c7d61b36 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/certificate_add_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(CertificateAddOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/certificate_add_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/certificate_add_parameter.py new file mode 100644 index 00000000..497ecad5 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/certificate_add_parameter.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateAddParameter(Model): + """A Certificate that can be installed on Compute Nodes and can be used to + authenticate operations on the machine. + + All required parameters must be populated in order to send to Azure. + + :param thumbprint: Required. The X.509 thumbprint of the Certificate. This + is a sequence of up to 40 hex digits (it may include spaces but these are + removed). + :type thumbprint: str + :param thumbprint_algorithm: Required. The algorithm used to derive the + thumbprint. This must be sha1. + :type thumbprint_algorithm: str + :param data: Required. The base64-encoded contents of the Certificate. The + maximum size is 10KB. + :type data: str + :param certificate_format: The format of the Certificate data. Possible + values include: 'pfx', 'cer' + :type certificate_format: str or ~azure.batch.models.CertificateFormat + :param password: The password to access the Certificate's private key. + This is required if the Certificate format is pfx. It should be omitted if + the Certificate format is cer. + :type password: str + """ + + _validation = { + 'thumbprint': {'required': True}, + 'thumbprint_algorithm': {'required': True}, + 'data': {'required': True}, + } + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'data': {'key': 'data', 'type': 'str'}, + 'certificate_format': {'key': 'certificateFormat', 'type': 'CertificateFormat'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(CertificateAddParameter, self).__init__(**kwargs) + self.thumbprint = kwargs.get('thumbprint', None) + self.thumbprint_algorithm = kwargs.get('thumbprint_algorithm', None) + self.data = kwargs.get('data', None) + self.certificate_format = kwargs.get('certificate_format', None) + self.password = kwargs.get('password', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/certificate_add_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/certificate_add_parameter_py3.py new file mode 100644 index 00000000..70c3f6c0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/certificate_add_parameter_py3.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateAddParameter(Model): + """A Certificate that can be installed on Compute Nodes and can be used to + authenticate operations on the machine. + + All required parameters must be populated in order to send to Azure. + + :param thumbprint: Required. The X.509 thumbprint of the Certificate. This + is a sequence of up to 40 hex digits (it may include spaces but these are + removed). + :type thumbprint: str + :param thumbprint_algorithm: Required. The algorithm used to derive the + thumbprint. This must be sha1. + :type thumbprint_algorithm: str + :param data: Required. The base64-encoded contents of the Certificate. The + maximum size is 10KB. + :type data: str + :param certificate_format: The format of the Certificate data. Possible + values include: 'pfx', 'cer' + :type certificate_format: str or ~azure.batch.models.CertificateFormat + :param password: The password to access the Certificate's private key. + This is required if the Certificate format is pfx. It should be omitted if + the Certificate format is cer. + :type password: str + """ + + _validation = { + 'thumbprint': {'required': True}, + 'thumbprint_algorithm': {'required': True}, + 'data': {'required': True}, + } + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'data': {'key': 'data', 'type': 'str'}, + 'certificate_format': {'key': 'certificateFormat', 'type': 'CertificateFormat'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, *, thumbprint: str, thumbprint_algorithm: str, data: str, certificate_format=None, password: str=None, **kwargs) -> None: + super(CertificateAddParameter, self).__init__(**kwargs) + self.thumbprint = thumbprint + self.thumbprint_algorithm = thumbprint_algorithm + self.data = data + self.certificate_format = certificate_format + self.password = password diff --git a/azext/generated/sdk/batch/v2019_08_01/models/certificate_cancel_deletion_options.py b/azext/generated/sdk/batch/v2019_08_01/models/certificate_cancel_deletion_options.py new file mode 100644 index 00000000..5c7c936c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/certificate_cancel_deletion_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateCancelDeletionOptions(Model): + """Additional parameters for cancel_deletion operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(CertificateCancelDeletionOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/certificate_cancel_deletion_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/certificate_cancel_deletion_options_py3.py new file mode 100644 index 00000000..8afbcf24 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/certificate_cancel_deletion_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateCancelDeletionOptions(Model): + """Additional parameters for cancel_deletion operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(CertificateCancelDeletionOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/certificate_delete_options.py b/azext/generated/sdk/batch/v2019_08_01/models/certificate_delete_options.py new file mode 100644 index 00000000..5ff7ee83 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/certificate_delete_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(CertificateDeleteOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/certificate_delete_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/certificate_delete_options_py3.py new file mode 100644 index 00000000..47f91b10 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/certificate_delete_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(CertificateDeleteOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/certificate_get_options.py b/azext/generated/sdk/batch/v2019_08_01/models/certificate_get_options.py new file mode 100644 index 00000000..2b474c17 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/certificate_get_options.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(CertificateGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/certificate_get_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/certificate_get_options_py3.py new file mode 100644 index 00000000..4bd6bb70 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/certificate_get_options_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(CertificateGetOptions, self).__init__(**kwargs) + self.select = select + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/certificate_list_options.py b/azext/generated/sdk/batch/v2019_08_01/models/certificate_list_options.py new file mode 100644 index 00000000..39c31a47 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/certificate_list_options.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Certificates can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(CertificateListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/certificate_list_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/certificate_list_options_py3.py new file mode 100644 index 00000000..d98edb44 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/certificate_list_options_py3.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Certificates can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(CertificateListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/certificate_paged.py b/azext/generated/sdk/batch/v2019_08_01/models/certificate_paged.py new file mode 100644 index 00000000..985d7838 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/certificate_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class CertificatePaged(Paged): + """ + A paging container for iterating over a list of :class:`Certificate ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[Certificate]'} + } + + def __init__(self, *args, **kwargs): + + super(CertificatePaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/certificate_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/certificate_py3.py new file mode 100644 index 00000000..cd64a868 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/certificate_py3.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Certificate(Model): + """A Certificate that can be installed on Compute Nodes and can be used to + authenticate operations on the machine. + + :param thumbprint: The X.509 thumbprint of the Certificate. This is a + sequence of up to 40 hex digits. + :type thumbprint: str + :param thumbprint_algorithm: The algorithm used to derive the thumbprint. + :type thumbprint_algorithm: str + :param url: The URL of the Certificate. + :type url: str + :param state: The current state of the Certificate. Possible values + include: 'active', 'deleting', 'deleteFailed' + :type state: str or ~azure.batch.models.CertificateState + :param state_transition_time: The time at which the Certificate entered + its current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the Certificate. This + property is not set if the Certificate is in its initial active state. + Possible values include: 'active', 'deleting', 'deleteFailed' + :type previous_state: str or ~azure.batch.models.CertificateState + :param previous_state_transition_time: The time at which the Certificate + entered its previous state. This property is not set if the Certificate is + in its initial Active state. + :type previous_state_transition_time: datetime + :param public_data: The public part of the Certificate as a base-64 + encoded .cer file. + :type public_data: str + :param delete_certificate_error: The error that occurred on the last + attempt to delete this Certificate. This property is set only if the + Certificate is in the DeleteFailed state. + :type delete_certificate_error: ~azure.batch.models.DeleteCertificateError + """ + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'CertificateState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'CertificateState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'public_data': {'key': 'publicData', 'type': 'str'}, + 'delete_certificate_error': {'key': 'deleteCertificateError', 'type': 'DeleteCertificateError'}, + } + + def __init__(self, *, thumbprint: str=None, thumbprint_algorithm: str=None, url: str=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, public_data: str=None, delete_certificate_error=None, **kwargs) -> None: + super(Certificate, self).__init__(**kwargs) + self.thumbprint = thumbprint + self.thumbprint_algorithm = thumbprint_algorithm + self.url = url + self.state = state + self.state_transition_time = state_transition_time + self.previous_state = previous_state + self.previous_state_transition_time = previous_state_transition_time + self.public_data = public_data + self.delete_certificate_error = delete_certificate_error diff --git a/azext/generated/sdk/batch/v2019_08_01/models/certificate_reference.py b/azext/generated/sdk/batch/v2019_08_01/models/certificate_reference.py new file mode 100644 index 00000000..dd759a08 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/certificate_reference.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateReference(Model): + """A reference to a Certificate to be installed on Compute Nodes in a Pool. + + All required parameters must be populated in order to send to Azure. + + :param thumbprint: Required. The thumbprint of the Certificate. + :type thumbprint: str + :param thumbprint_algorithm: Required. The algorithm with which the + thumbprint is associated. This must be sha1. + :type thumbprint_algorithm: str + :param store_location: The location of the Certificate store on the + Compute Node into which to install the Certificate. The default value is + currentuser. This property is applicable only for Pools configured with + Windows Compute Nodes (that is, created with cloudServiceConfiguration, or + with virtualMachineConfiguration using a Windows Image reference). For + Linux Compute Nodes, the Certificates are stored in a directory inside the + Task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this + location. For Certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and Certificates are placed in that directory. + Possible values include: 'currentUser', 'localMachine' + :type store_location: str or ~azure.batch.models.CertificateStoreLocation + :param store_name: The name of the Certificate store on the Compute Node + into which to install the Certificate. This property is applicable only + for Pools configured with Windows Compute Nodes (that is, created with + cloudServiceConfiguration, or with virtualMachineConfiguration using a + Windows Image reference). Common store names include: My, Root, CA, Trust, + Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but + any custom store name can also be used. The default value is My. + :type store_name: str + :param visibility: Which user Accounts on the Compute Node should have + access to the private data of the Certificate. You can specify more than + one visibility in this collection. The default is all Accounts. + :type visibility: list[str or ~azure.batch.models.CertificateVisibility] + """ + + _validation = { + 'thumbprint': {'required': True}, + 'thumbprint_algorithm': {'required': True}, + } + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'store_location': {'key': 'storeLocation', 'type': 'CertificateStoreLocation'}, + 'store_name': {'key': 'storeName', 'type': 'str'}, + 'visibility': {'key': 'visibility', 'type': '[CertificateVisibility]'}, + } + + def __init__(self, **kwargs): + super(CertificateReference, self).__init__(**kwargs) + self.thumbprint = kwargs.get('thumbprint', None) + self.thumbprint_algorithm = kwargs.get('thumbprint_algorithm', None) + self.store_location = kwargs.get('store_location', None) + self.store_name = kwargs.get('store_name', None) + self.visibility = kwargs.get('visibility', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/certificate_reference_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/certificate_reference_py3.py new file mode 100644 index 00000000..d125e9e9 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/certificate_reference_py3.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CertificateReference(Model): + """A reference to a Certificate to be installed on Compute Nodes in a Pool. + + All required parameters must be populated in order to send to Azure. + + :param thumbprint: Required. The thumbprint of the Certificate. + :type thumbprint: str + :param thumbprint_algorithm: Required. The algorithm with which the + thumbprint is associated. This must be sha1. + :type thumbprint_algorithm: str + :param store_location: The location of the Certificate store on the + Compute Node into which to install the Certificate. The default value is + currentuser. This property is applicable only for Pools configured with + Windows Compute Nodes (that is, created with cloudServiceConfiguration, or + with virtualMachineConfiguration using a Windows Image reference). For + Linux Compute Nodes, the Certificates are stored in a directory inside the + Task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this + location. For Certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and Certificates are placed in that directory. + Possible values include: 'currentUser', 'localMachine' + :type store_location: str or ~azure.batch.models.CertificateStoreLocation + :param store_name: The name of the Certificate store on the Compute Node + into which to install the Certificate. This property is applicable only + for Pools configured with Windows Compute Nodes (that is, created with + cloudServiceConfiguration, or with virtualMachineConfiguration using a + Windows Image reference). Common store names include: My, Root, CA, Trust, + Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but + any custom store name can also be used. The default value is My. + :type store_name: str + :param visibility: Which user Accounts on the Compute Node should have + access to the private data of the Certificate. You can specify more than + one visibility in this collection. The default is all Accounts. + :type visibility: list[str or ~azure.batch.models.CertificateVisibility] + """ + + _validation = { + 'thumbprint': {'required': True}, + 'thumbprint_algorithm': {'required': True}, + } + + _attribute_map = { + 'thumbprint': {'key': 'thumbprint', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'thumbprintAlgorithm', 'type': 'str'}, + 'store_location': {'key': 'storeLocation', 'type': 'CertificateStoreLocation'}, + 'store_name': {'key': 'storeName', 'type': 'str'}, + 'visibility': {'key': 'visibility', 'type': '[CertificateVisibility]'}, + } + + def __init__(self, *, thumbprint: str, thumbprint_algorithm: str, store_location=None, store_name: str=None, visibility=None, **kwargs) -> None: + super(CertificateReference, self).__init__(**kwargs) + self.thumbprint = thumbprint + self.thumbprint_algorithm = thumbprint_algorithm + self.store_location = store_location + self.store_name = store_name + self.visibility = visibility diff --git a/azext/generated/sdk/batch/v2019_08_01/models/cifs_mount_configuration.py b/azext/generated/sdk/batch/v2019_08_01/models/cifs_mount_configuration.py new file mode 100644 index 00000000..3d2e5720 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/cifs_mount_configuration.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CIFSMountConfiguration(Model): + """Information used to connect to a CIFS file system. + + All required parameters must be populated in order to send to Azure. + + :param username: Required. The user to use for authentication against the + CIFS file system. + :type username: str + :param source: Required. The URI of the file system to mount. + :type source: str + :param relative_mount_path: Required. The relative path on the compute + node where the file system will be mounted. All file systems are mounted + relative to the Batch mounts directory, accessible via the + AZ_BATCH_NODE_MOUNTS_DIR environment variable. + :type relative_mount_path: str + :param mount_options: Additional command line options to pass to the mount + command. These are 'net use' options in Windows and 'mount' options in + Linux. + :type mount_options: str + :param password: Required. The password to use for authentication against + the CIFS file system. + :type password: str + """ + + _validation = { + 'username': {'required': True}, + 'source': {'required': True}, + 'relative_mount_path': {'required': True}, + 'password': {'required': True}, + } + + _attribute_map = { + 'username': {'key': 'username', 'type': 'str'}, + 'source': {'key': 'source', 'type': 'str'}, + 'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'}, + 'mount_options': {'key': 'mountOptions', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(CIFSMountConfiguration, self).__init__(**kwargs) + self.username = kwargs.get('username', None) + self.source = kwargs.get('source', None) + self.relative_mount_path = kwargs.get('relative_mount_path', None) + self.mount_options = kwargs.get('mount_options', None) + self.password = kwargs.get('password', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/cifs_mount_configuration_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/cifs_mount_configuration_py3.py new file mode 100644 index 00000000..bc005ed0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/cifs_mount_configuration_py3.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CIFSMountConfiguration(Model): + """Information used to connect to a CIFS file system. + + All required parameters must be populated in order to send to Azure. + + :param username: Required. The user to use for authentication against the + CIFS file system. + :type username: str + :param source: Required. The URI of the file system to mount. + :type source: str + :param relative_mount_path: Required. The relative path on the compute + node where the file system will be mounted. All file systems are mounted + relative to the Batch mounts directory, accessible via the + AZ_BATCH_NODE_MOUNTS_DIR environment variable. + :type relative_mount_path: str + :param mount_options: Additional command line options to pass to the mount + command. These are 'net use' options in Windows and 'mount' options in + Linux. + :type mount_options: str + :param password: Required. The password to use for authentication against + the CIFS file system. + :type password: str + """ + + _validation = { + 'username': {'required': True}, + 'source': {'required': True}, + 'relative_mount_path': {'required': True}, + 'password': {'required': True}, + } + + _attribute_map = { + 'username': {'key': 'username', 'type': 'str'}, + 'source': {'key': 'source', 'type': 'str'}, + 'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'}, + 'mount_options': {'key': 'mountOptions', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, *, username: str, source: str, relative_mount_path: str, password: str, mount_options: str=None, **kwargs) -> None: + super(CIFSMountConfiguration, self).__init__(**kwargs) + self.username = username + self.source = source + self.relative_mount_path = relative_mount_path + self.mount_options = mount_options + self.password = password diff --git a/azext/generated/sdk/batch/v2019_08_01/models/cloud_job.py b/azext/generated/sdk/batch/v2019_08_01/models/cloud_job.py new file mode 100644 index 00000000..a2b73818 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/cloud_job.py @@ -0,0 +1,164 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudJob(Model): + """An Azure Batch Job. + + :param id: A string that uniquely identifies the Job within the Account. + The ID is case-preserving and case-insensitive (that is, you may not have + two IDs within an Account that differ only by case). + :type id: str + :param display_name: The display name for the Job. + :type display_name: str + :param uses_task_dependencies: Whether Tasks in the Job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + :param url: The URL of the Job. + :type url: str + :param e_tag: The ETag of the Job. This is an opaque string. You can use + it to detect whether the Job has changed between requests. In particular, + you can be pass the ETag when updating a Job to specify that your changes + should take effect only if nobody else has modified the Job in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the Job. This is the last + time at which the Job level data, such as the Job state or priority, + changed. It does not factor in task-level changes such as adding new Tasks + or Tasks changing state. + :type last_modified: datetime + :param creation_time: The creation time of the Job. + :type creation_time: datetime + :param state: The current state of the Job. Possible values include: + 'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed', + 'deleting' + :type state: str or ~azure.batch.models.JobState + :param state_transition_time: The time at which the Job entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the Job. This property is not + set if the Job is in its initial Active state. Possible values include: + 'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed', + 'deleting' + :type previous_state: str or ~azure.batch.models.JobState + :param previous_state_transition_time: The time at which the Job entered + its previous state. This property is not set if the Job is in its initial + Active state. + :type previous_state_transition_time: datetime + :param priority: The priority of the Job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. The default value is 0. + :type priority: int + :param constraints: The execution constraints for the Job. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: Details of a Job Manager Task to be launched when + the Job is started. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation Task. The Job Preparation + Task is a special Task run on each Compute Node before any other Task of + the Job. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release Task. The Job Release Task is a + special Task run at the end of the Job on each Compute Node that has run + any other Task of the Job. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: The list of common environment + variable settings. These environment variables are set for all Tasks in + the Job (including the Job Manager, Job Preparation and Job Release + Tasks). Individual Tasks can override an environment setting specified + here by specifying the same setting name with a different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: The Pool settings associated with the Job. + :type pool_info: ~azure.batch.models.PoolInformation + :param on_all_tasks_complete: The action the Batch service should take + when all Tasks in the Job are in the completed state. The default is + noaction. Possible values include: 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + Task in the Job fails. A Task is considered to have failed if has a + failureInfo. A failureInfo is set if the Task completes with a non-zero + exit code after exhausting its retry count, or if there was an error + starting the Task, for example due to a resource file download error. The + default is noaction. Possible values include: 'noAction', + 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param network_configuration: The network configuration for the Job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration + :param metadata: A list of name-value pairs associated with the Job as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param execution_info: The execution information for the Job. + :type execution_info: ~azure.batch.models.JobExecutionInformation + :param stats: Resource usage statistics for the entire lifetime of the + Job. This property is populated only if the CloudJob was retrieved with an + expand clause including the 'stats' attribute; otherwise it is null. The + statistics may not be immediately available. The Batch service performs + periodic roll-up of statistics. The typical delay is about 30 minutes. + :type stats: ~azure.batch.models.JobStatistics + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'JobState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'execution_info': {'key': 'executionInfo', 'type': 'JobExecutionInformation'}, + 'stats': {'key': 'stats', 'type': 'JobStatistics'}, + } + + def __init__(self, **kwargs): + super(CloudJob, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.uses_task_dependencies = kwargs.get('uses_task_dependencies', None) + self.url = kwargs.get('url', None) + self.e_tag = kwargs.get('e_tag', None) + self.last_modified = kwargs.get('last_modified', None) + self.creation_time = kwargs.get('creation_time', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.previous_state = kwargs.get('previous_state', None) + self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) + self.priority = kwargs.get('priority', None) + self.constraints = kwargs.get('constraints', None) + self.job_manager_task = kwargs.get('job_manager_task', None) + self.job_preparation_task = kwargs.get('job_preparation_task', None) + self.job_release_task = kwargs.get('job_release_task', None) + self.common_environment_settings = kwargs.get('common_environment_settings', None) + self.pool_info = kwargs.get('pool_info', None) + self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) + self.on_task_failure = kwargs.get('on_task_failure', None) + self.network_configuration = kwargs.get('network_configuration', None) + self.metadata = kwargs.get('metadata', None) + self.execution_info = kwargs.get('execution_info', None) + self.stats = kwargs.get('stats', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/cloud_job_paged.py b/azext/generated/sdk/batch/v2019_08_01/models/cloud_job_paged.py new file mode 100644 index 00000000..c642458f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/cloud_job_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class CloudJobPaged(Paged): + """ + A paging container for iterating over a list of :class:`CloudJob ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[CloudJob]'} + } + + def __init__(self, *args, **kwargs): + + super(CloudJobPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/cloud_job_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/cloud_job_py3.py new file mode 100644 index 00000000..fc07528a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/cloud_job_py3.py @@ -0,0 +1,164 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudJob(Model): + """An Azure Batch Job. + + :param id: A string that uniquely identifies the Job within the Account. + The ID is case-preserving and case-insensitive (that is, you may not have + two IDs within an Account that differ only by case). + :type id: str + :param display_name: The display name for the Job. + :type display_name: str + :param uses_task_dependencies: Whether Tasks in the Job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + :param url: The URL of the Job. + :type url: str + :param e_tag: The ETag of the Job. This is an opaque string. You can use + it to detect whether the Job has changed between requests. In particular, + you can be pass the ETag when updating a Job to specify that your changes + should take effect only if nobody else has modified the Job in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the Job. This is the last + time at which the Job level data, such as the Job state or priority, + changed. It does not factor in task-level changes such as adding new Tasks + or Tasks changing state. + :type last_modified: datetime + :param creation_time: The creation time of the Job. + :type creation_time: datetime + :param state: The current state of the Job. Possible values include: + 'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed', + 'deleting' + :type state: str or ~azure.batch.models.JobState + :param state_transition_time: The time at which the Job entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the Job. This property is not + set if the Job is in its initial Active state. Possible values include: + 'active', 'disabling', 'disabled', 'enabling', 'terminating', 'completed', + 'deleting' + :type previous_state: str or ~azure.batch.models.JobState + :param previous_state_transition_time: The time at which the Job entered + its previous state. This property is not set if the Job is in its initial + Active state. + :type previous_state_transition_time: datetime + :param priority: The priority of the Job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. The default value is 0. + :type priority: int + :param constraints: The execution constraints for the Job. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: Details of a Job Manager Task to be launched when + the Job is started. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation Task. The Job Preparation + Task is a special Task run on each Compute Node before any other Task of + the Job. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release Task. The Job Release Task is a + special Task run at the end of the Job on each Compute Node that has run + any other Task of the Job. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: The list of common environment + variable settings. These environment variables are set for all Tasks in + the Job (including the Job Manager, Job Preparation and Job Release + Tasks). Individual Tasks can override an environment setting specified + here by specifying the same setting name with a different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: The Pool settings associated with the Job. + :type pool_info: ~azure.batch.models.PoolInformation + :param on_all_tasks_complete: The action the Batch service should take + when all Tasks in the Job are in the completed state. The default is + noaction. Possible values include: 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + Task in the Job fails. A Task is considered to have failed if has a + failureInfo. A failureInfo is set if the Task completes with a non-zero + exit code after exhausting its retry count, or if there was an error + starting the Task, for example due to a resource file download error. The + default is noaction. Possible values include: 'noAction', + 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param network_configuration: The network configuration for the Job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration + :param metadata: A list of name-value pairs associated with the Job as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param execution_info: The execution information for the Job. + :type execution_info: ~azure.batch.models.JobExecutionInformation + :param stats: Resource usage statistics for the entire lifetime of the + Job. This property is populated only if the CloudJob was retrieved with an + expand clause including the 'stats' attribute; otherwise it is null. The + statistics may not be immediately available. The Batch service performs + periodic roll-up of statistics. The typical delay is about 30 minutes. + :type stats: ~azure.batch.models.JobStatistics + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'JobState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'execution_info': {'key': 'executionInfo', 'type': 'JobExecutionInformation'}, + 'stats': {'key': 'stats', 'type': 'JobStatistics'}, + } + + def __init__(self, *, id: str=None, display_name: str=None, uses_task_dependencies: bool=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, priority: int=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, pool_info=None, on_all_tasks_complete=None, on_task_failure=None, network_configuration=None, metadata=None, execution_info=None, stats=None, **kwargs) -> None: + super(CloudJob, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.uses_task_dependencies = uses_task_dependencies + self.url = url + self.e_tag = e_tag + self.last_modified = last_modified + self.creation_time = creation_time + self.state = state + self.state_transition_time = state_transition_time + self.previous_state = previous_state + self.previous_state_transition_time = previous_state_transition_time + self.priority = priority + self.constraints = constraints + self.job_manager_task = job_manager_task + self.job_preparation_task = job_preparation_task + self.job_release_task = job_release_task + self.common_environment_settings = common_environment_settings + self.pool_info = pool_info + self.on_all_tasks_complete = on_all_tasks_complete + self.on_task_failure = on_task_failure + self.network_configuration = network_configuration + self.metadata = metadata + self.execution_info = execution_info + self.stats = stats diff --git a/azext/generated/sdk/batch/v2019_08_01/models/cloud_job_schedule.py b/azext/generated/sdk/batch/v2019_08_01/models/cloud_job_schedule.py new file mode 100644 index 00000000..14eeed02 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/cloud_job_schedule.py @@ -0,0 +1,107 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudJobSchedule(Model): + """A Job Schedule that allows recurring Jobs by specifying when to run Jobs + and a specification used to create each Job. + + :param id: A string that uniquely identifies the schedule within the + Account. + :type id: str + :param display_name: The display name for the schedule. + :type display_name: str + :param url: The URL of the Job Schedule. + :type url: str + :param e_tag: The ETag of the Job Schedule. This is an opaque string. You + can use it to detect whether the Job Schedule has changed between + requests. In particular, you can be pass the ETag with an Update Job + Schedule request to specify that your changes should take effect only if + nobody else has modified the schedule in the meantime. + :type e_tag: str + :param last_modified: The last modified time of the Job Schedule. This is + the last time at which the schedule level data, such as the Job + specification or recurrence information, changed. It does not factor in + job-level changes such as new Jobs being created or Jobs changing state. + :type last_modified: datetime + :param creation_time: The creation time of the Job Schedule. + :type creation_time: datetime + :param state: The current state of the Job Schedule. Possible values + include: 'active', 'completed', 'disabled', 'terminating', 'deleting' + :type state: str or ~azure.batch.models.JobScheduleState + :param state_transition_time: The time at which the Job Schedule entered + the current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the Job Schedule. This + property is not present if the Job Schedule is in its initial active + state. Possible values include: 'active', 'completed', 'disabled', + 'terminating', 'deleting' + :type previous_state: str or ~azure.batch.models.JobScheduleState + :param previous_state_transition_time: The time at which the Job Schedule + entered its previous state. This property is not present if the Job + Schedule is in its initial active state. + :type previous_state_transition_time: datetime + :param schedule: The schedule according to which Jobs will be created. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: The details of the Jobs to be created on this + schedule. + :type job_specification: ~azure.batch.models.JobSpecification + :param execution_info: Information about Jobs that have been and will be + run under this schedule. + :type execution_info: ~azure.batch.models.JobScheduleExecutionInformation + :param metadata: A list of name-value pairs associated with the schedule + as metadata. The Batch service does not assign any meaning to metadata; it + is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param stats: The lifetime resource usage statistics for the Job Schedule. + The statistics may not be immediately available. The Batch service + performs periodic roll-up of statistics. The typical delay is about 30 + minutes. + :type stats: ~azure.batch.models.JobScheduleStatistics + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobScheduleState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'JobScheduleState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'execution_info': {'key': 'executionInfo', 'type': 'JobScheduleExecutionInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'stats': {'key': 'stats', 'type': 'JobScheduleStatistics'}, + } + + def __init__(self, **kwargs): + super(CloudJobSchedule, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.url = kwargs.get('url', None) + self.e_tag = kwargs.get('e_tag', None) + self.last_modified = kwargs.get('last_modified', None) + self.creation_time = kwargs.get('creation_time', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.previous_state = kwargs.get('previous_state', None) + self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) + self.schedule = kwargs.get('schedule', None) + self.job_specification = kwargs.get('job_specification', None) + self.execution_info = kwargs.get('execution_info', None) + self.metadata = kwargs.get('metadata', None) + self.stats = kwargs.get('stats', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/cloud_job_schedule_paged.py b/azext/generated/sdk/batch/v2019_08_01/models/cloud_job_schedule_paged.py new file mode 100644 index 00000000..3abb6f15 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/cloud_job_schedule_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class CloudJobSchedulePaged(Paged): + """ + A paging container for iterating over a list of :class:`CloudJobSchedule ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[CloudJobSchedule]'} + } + + def __init__(self, *args, **kwargs): + + super(CloudJobSchedulePaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/cloud_job_schedule_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/cloud_job_schedule_py3.py new file mode 100644 index 00000000..da9b187f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/cloud_job_schedule_py3.py @@ -0,0 +1,107 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudJobSchedule(Model): + """A Job Schedule that allows recurring Jobs by specifying when to run Jobs + and a specification used to create each Job. + + :param id: A string that uniquely identifies the schedule within the + Account. + :type id: str + :param display_name: The display name for the schedule. + :type display_name: str + :param url: The URL of the Job Schedule. + :type url: str + :param e_tag: The ETag of the Job Schedule. This is an opaque string. You + can use it to detect whether the Job Schedule has changed between + requests. In particular, you can be pass the ETag with an Update Job + Schedule request to specify that your changes should take effect only if + nobody else has modified the schedule in the meantime. + :type e_tag: str + :param last_modified: The last modified time of the Job Schedule. This is + the last time at which the schedule level data, such as the Job + specification or recurrence information, changed. It does not factor in + job-level changes such as new Jobs being created or Jobs changing state. + :type last_modified: datetime + :param creation_time: The creation time of the Job Schedule. + :type creation_time: datetime + :param state: The current state of the Job Schedule. Possible values + include: 'active', 'completed', 'disabled', 'terminating', 'deleting' + :type state: str or ~azure.batch.models.JobScheduleState + :param state_transition_time: The time at which the Job Schedule entered + the current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the Job Schedule. This + property is not present if the Job Schedule is in its initial active + state. Possible values include: 'active', 'completed', 'disabled', + 'terminating', 'deleting' + :type previous_state: str or ~azure.batch.models.JobScheduleState + :param previous_state_transition_time: The time at which the Job Schedule + entered its previous state. This property is not present if the Job + Schedule is in its initial active state. + :type previous_state_transition_time: datetime + :param schedule: The schedule according to which Jobs will be created. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: The details of the Jobs to be created on this + schedule. + :type job_specification: ~azure.batch.models.JobSpecification + :param execution_info: Information about Jobs that have been and will be + run under this schedule. + :type execution_info: ~azure.batch.models.JobScheduleExecutionInformation + :param metadata: A list of name-value pairs associated with the schedule + as metadata. The Batch service does not assign any meaning to metadata; it + is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param stats: The lifetime resource usage statistics for the Job Schedule. + The statistics may not be immediately available. The Batch service + performs periodic roll-up of statistics. The typical delay is about 30 + minutes. + :type stats: ~azure.batch.models.JobScheduleStatistics + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobScheduleState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'JobScheduleState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'execution_info': {'key': 'executionInfo', 'type': 'JobScheduleExecutionInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'stats': {'key': 'stats', 'type': 'JobScheduleStatistics'}, + } + + def __init__(self, *, id: str=None, display_name: str=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, schedule=None, job_specification=None, execution_info=None, metadata=None, stats=None, **kwargs) -> None: + super(CloudJobSchedule, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.url = url + self.e_tag = e_tag + self.last_modified = last_modified + self.creation_time = creation_time + self.state = state + self.state_transition_time = state_transition_time + self.previous_state = previous_state + self.previous_state_transition_time = previous_state_transition_time + self.schedule = schedule + self.job_specification = job_specification + self.execution_info = execution_info + self.metadata = metadata + self.stats = stats diff --git a/azext/generated/sdk/batch/v2019_08_01/models/cloud_pool.py b/azext/generated/sdk/batch/v2019_08_01/models/cloud_pool.py new file mode 100644 index 00000000..dcd1caf2 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/cloud_pool.py @@ -0,0 +1,250 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudPool(Model): + """A Pool in the Azure Batch service. + + :param id: A string that uniquely identifies the Pool within the Account. + The ID can contain any combination of alphanumeric characters including + hyphens and underscores, and cannot contain more than 64 characters. The + ID is case-preserving and case-insensitive (that is, you may not have two + IDs within an Account that differ only by case). + :type id: str + :param display_name: The display name for the Pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param url: The URL of the Pool. + :type url: str + :param e_tag: The ETag of the Pool. This is an opaque string. You can use + it to detect whether the Pool has changed between requests. In particular, + you can be pass the ETag when updating a Pool to specify that your changes + should take effect only if nobody else has modified the Pool in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the Pool. This is the last + time at which the Pool level data, such as the targetDedicatedNodes or + enableAutoscale settings, changed. It does not factor in node-level + changes such as a Compute Node changing state. + :type last_modified: datetime + :param creation_time: The creation time of the Pool. + :type creation_time: datetime + :param state: The current state of the Pool. Possible values include: + 'active', 'deleting' + :type state: str or ~azure.batch.models.PoolState + :param state_transition_time: The time at which the Pool entered its + current state. + :type state_transition_time: datetime + :param allocation_state: Whether the Pool is resizing. Possible values + include: 'steady', 'resizing', 'stopping' + :type allocation_state: str or ~azure.batch.models.AllocationState + :param allocation_state_transition_time: The time at which the Pool + entered its current allocation state. + :type allocation_state_transition_time: datetime + :param vm_size: The size of virtual machines in the Pool. All virtual + machines in a Pool are the same size. For information about available + sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes + in an Azure Batch Pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the Pool. This property and virtualMachineConfiguration are mutually + exclusive and one of the properties must be specified. This property + cannot be specified if the Batch Account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the Pool. This property and cloudServiceConfiguration are mutually + exclusive and one of the properties must be specified. + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param resize_timeout: The timeout for allocation of Compute Nodes to the + Pool. This is the timeout for the most recent resize operation. (The + initial sizing when the Pool is created counts as a resize.) The default + value is 15 minutes. + :type resize_timeout: timedelta + :param resize_errors: A list of errors encountered while performing the + last resize on the Pool. This property is set only if one or more errors + occurred during the last Pool resize, and only when the Pool + allocationState is Steady. + :type resize_errors: list[~azure.batch.models.ResizeError] + :param current_dedicated_nodes: The number of dedicated Compute Nodes + currently in the Pool. + :type current_dedicated_nodes: int + :param current_low_priority_nodes: The number of low-priority Compute + Nodes currently in the Pool. Low-priority Compute Nodes which have been + preempted are included in this count. + :type current_low_priority_nodes: int + :param target_dedicated_nodes: The desired number of dedicated Compute + Nodes in the Pool. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + Compute Nodes in the Pool. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the Pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + property is required and the Pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: A formula for the desired number of Compute + Nodes in the Pool. This property is set only if the Pool automatically + scales, i.e. enableAutoScale is true. + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the Pool size according to the autoscale formula. + This property is set only if the Pool automatically scales, i.e. + enableAutoScale is true. + :type auto_scale_evaluation_interval: timedelta + :param auto_scale_run: The results and errors from the last execution of + the autoscale formula. This property is set only if the Pool automatically + scales, i.e. enableAutoScale is true. + :type auto_scale_run: ~azure.batch.models.AutoScaleRun + :param enable_inter_node_communication: Whether the Pool permits direct + communication between Compute Nodes. This imposes restrictions on which + Compute Nodes can be assigned to the Pool. Specifying this value can + reduce the chance of the requested number of Compute Nodes to be allocated + in the Pool. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the Pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A Task specified to run on each Compute Node as it + joins the Pool. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: The list of Certificates to be installed on + each Compute Node in the Pool. For Windows Nodes, the Batch service + installs the Certificates to the specified Certificate store and location. + For Linux Compute Nodes, the Certificates are stored in a directory inside + the Task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this + location. For Certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and Certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of Packages to be + installed on each Compute Node in the Pool. Changes to Package references + affect all new Nodes joining the Pool, but do not affect Compute Nodes + that are already in the Pool until they are rebooted or reimaged. There is + a maximum of 10 Package references on any given Pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each Compute Node in the Pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + Pool creation will fail. + :type application_licenses: list[str] + :param max_tasks_per_node: The maximum number of Tasks that can run + concurrently on a single Compute Node in the Pool. The default value is 1. + The maximum value is the smaller of 4 times the number of cores of the + vmSize of the Pool or 256. + :type max_tasks_per_node: int + :param task_scheduling_policy: How Tasks are distributed across Compute + Nodes in a Pool. If not specified, the default is spread. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param user_accounts: The list of user Accounts to be created on each + Compute Node in the Pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the Pool as + metadata. + :type metadata: list[~azure.batch.models.MetadataItem] + :param stats: Utilization and resource usage statistics for the entire + lifetime of the Pool. This property is populated only if the CloudPool was + retrieved with an expand clause including the 'stats' attribute; otherwise + it is null. The statistics may not be immediately available. The Batch + service performs periodic roll-up of statistics. The typical delay is + about 30 minutes. + :type stats: ~azure.batch.models.PoolStatistics + :param mount_configuration: A list of file systems to mount on each node + in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. + :type mount_configuration: list[~azure.batch.models.MountConfiguration] + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'PoolState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'allocation_state': {'key': 'allocationState', 'type': 'AllocationState'}, + 'allocation_state_transition_time': {'key': 'allocationStateTransitionTime', 'type': 'iso-8601'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'resize_errors': {'key': 'resizeErrors', 'type': '[ResizeError]'}, + 'current_dedicated_nodes': {'key': 'currentDedicatedNodes', 'type': 'int'}, + 'current_low_priority_nodes': {'key': 'currentLowPriorityNodes', 'type': 'int'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'auto_scale_run': {'key': 'autoScaleRun', 'type': 'AutoScaleRun'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'stats': {'key': 'stats', 'type': 'PoolStatistics'}, + 'mount_configuration': {'key': 'mountConfiguration', 'type': '[MountConfiguration]'}, + } + + def __init__(self, **kwargs): + super(CloudPool, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.url = kwargs.get('url', None) + self.e_tag = kwargs.get('e_tag', None) + self.last_modified = kwargs.get('last_modified', None) + self.creation_time = kwargs.get('creation_time', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.allocation_state = kwargs.get('allocation_state', None) + self.allocation_state_transition_time = kwargs.get('allocation_state_transition_time', None) + self.vm_size = kwargs.get('vm_size', None) + self.cloud_service_configuration = kwargs.get('cloud_service_configuration', None) + self.virtual_machine_configuration = kwargs.get('virtual_machine_configuration', None) + self.resize_timeout = kwargs.get('resize_timeout', None) + self.resize_errors = kwargs.get('resize_errors', None) + self.current_dedicated_nodes = kwargs.get('current_dedicated_nodes', None) + self.current_low_priority_nodes = kwargs.get('current_low_priority_nodes', None) + self.target_dedicated_nodes = kwargs.get('target_dedicated_nodes', None) + self.target_low_priority_nodes = kwargs.get('target_low_priority_nodes', None) + self.enable_auto_scale = kwargs.get('enable_auto_scale', None) + self.auto_scale_formula = kwargs.get('auto_scale_formula', None) + self.auto_scale_evaluation_interval = kwargs.get('auto_scale_evaluation_interval', None) + self.auto_scale_run = kwargs.get('auto_scale_run', None) + self.enable_inter_node_communication = kwargs.get('enable_inter_node_communication', None) + self.network_configuration = kwargs.get('network_configuration', None) + self.start_task = kwargs.get('start_task', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.application_licenses = kwargs.get('application_licenses', None) + self.max_tasks_per_node = kwargs.get('max_tasks_per_node', None) + self.task_scheduling_policy = kwargs.get('task_scheduling_policy', None) + self.user_accounts = kwargs.get('user_accounts', None) + self.metadata = kwargs.get('metadata', None) + self.stats = kwargs.get('stats', None) + self.mount_configuration = kwargs.get('mount_configuration', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/cloud_pool_paged.py b/azext/generated/sdk/batch/v2019_08_01/models/cloud_pool_paged.py new file mode 100644 index 00000000..c23eb7cd --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/cloud_pool_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class CloudPoolPaged(Paged): + """ + A paging container for iterating over a list of :class:`CloudPool ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[CloudPool]'} + } + + def __init__(self, *args, **kwargs): + + super(CloudPoolPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/cloud_pool_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/cloud_pool_py3.py new file mode 100644 index 00000000..aba1c980 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/cloud_pool_py3.py @@ -0,0 +1,250 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudPool(Model): + """A Pool in the Azure Batch service. + + :param id: A string that uniquely identifies the Pool within the Account. + The ID can contain any combination of alphanumeric characters including + hyphens and underscores, and cannot contain more than 64 characters. The + ID is case-preserving and case-insensitive (that is, you may not have two + IDs within an Account that differ only by case). + :type id: str + :param display_name: The display name for the Pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param url: The URL of the Pool. + :type url: str + :param e_tag: The ETag of the Pool. This is an opaque string. You can use + it to detect whether the Pool has changed between requests. In particular, + you can be pass the ETag when updating a Pool to specify that your changes + should take effect only if nobody else has modified the Pool in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the Pool. This is the last + time at which the Pool level data, such as the targetDedicatedNodes or + enableAutoscale settings, changed. It does not factor in node-level + changes such as a Compute Node changing state. + :type last_modified: datetime + :param creation_time: The creation time of the Pool. + :type creation_time: datetime + :param state: The current state of the Pool. Possible values include: + 'active', 'deleting' + :type state: str or ~azure.batch.models.PoolState + :param state_transition_time: The time at which the Pool entered its + current state. + :type state_transition_time: datetime + :param allocation_state: Whether the Pool is resizing. Possible values + include: 'steady', 'resizing', 'stopping' + :type allocation_state: str or ~azure.batch.models.AllocationState + :param allocation_state_transition_time: The time at which the Pool + entered its current allocation state. + :type allocation_state_transition_time: datetime + :param vm_size: The size of virtual machines in the Pool. All virtual + machines in a Pool are the same size. For information about available + sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes + in an Azure Batch Pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the Pool. This property and virtualMachineConfiguration are mutually + exclusive and one of the properties must be specified. This property + cannot be specified if the Batch Account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the Pool. This property and cloudServiceConfiguration are mutually + exclusive and one of the properties must be specified. + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param resize_timeout: The timeout for allocation of Compute Nodes to the + Pool. This is the timeout for the most recent resize operation. (The + initial sizing when the Pool is created counts as a resize.) The default + value is 15 minutes. + :type resize_timeout: timedelta + :param resize_errors: A list of errors encountered while performing the + last resize on the Pool. This property is set only if one or more errors + occurred during the last Pool resize, and only when the Pool + allocationState is Steady. + :type resize_errors: list[~azure.batch.models.ResizeError] + :param current_dedicated_nodes: The number of dedicated Compute Nodes + currently in the Pool. + :type current_dedicated_nodes: int + :param current_low_priority_nodes: The number of low-priority Compute + Nodes currently in the Pool. Low-priority Compute Nodes which have been + preempted are included in this count. + :type current_low_priority_nodes: int + :param target_dedicated_nodes: The desired number of dedicated Compute + Nodes in the Pool. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + Compute Nodes in the Pool. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the Pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + property is required and the Pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: A formula for the desired number of Compute + Nodes in the Pool. This property is set only if the Pool automatically + scales, i.e. enableAutoScale is true. + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the Pool size according to the autoscale formula. + This property is set only if the Pool automatically scales, i.e. + enableAutoScale is true. + :type auto_scale_evaluation_interval: timedelta + :param auto_scale_run: The results and errors from the last execution of + the autoscale formula. This property is set only if the Pool automatically + scales, i.e. enableAutoScale is true. + :type auto_scale_run: ~azure.batch.models.AutoScaleRun + :param enable_inter_node_communication: Whether the Pool permits direct + communication between Compute Nodes. This imposes restrictions on which + Compute Nodes can be assigned to the Pool. Specifying this value can + reduce the chance of the requested number of Compute Nodes to be allocated + in the Pool. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the Pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A Task specified to run on each Compute Node as it + joins the Pool. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: The list of Certificates to be installed on + each Compute Node in the Pool. For Windows Nodes, the Batch service + installs the Certificates to the specified Certificate store and location. + For Linux Compute Nodes, the Certificates are stored in a directory inside + the Task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this + location. For Certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and Certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of Packages to be + installed on each Compute Node in the Pool. Changes to Package references + affect all new Nodes joining the Pool, but do not affect Compute Nodes + that are already in the Pool until they are rebooted or reimaged. There is + a maximum of 10 Package references on any given Pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each Compute Node in the Pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + Pool creation will fail. + :type application_licenses: list[str] + :param max_tasks_per_node: The maximum number of Tasks that can run + concurrently on a single Compute Node in the Pool. The default value is 1. + The maximum value is the smaller of 4 times the number of cores of the + vmSize of the Pool or 256. + :type max_tasks_per_node: int + :param task_scheduling_policy: How Tasks are distributed across Compute + Nodes in a Pool. If not specified, the default is spread. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param user_accounts: The list of user Accounts to be created on each + Compute Node in the Pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the Pool as + metadata. + :type metadata: list[~azure.batch.models.MetadataItem] + :param stats: Utilization and resource usage statistics for the entire + lifetime of the Pool. This property is populated only if the CloudPool was + retrieved with an expand clause including the 'stats' attribute; otherwise + it is null. The statistics may not be immediately available. The Batch + service performs periodic roll-up of statistics. The typical delay is + about 30 minutes. + :type stats: ~azure.batch.models.PoolStatistics + :param mount_configuration: A list of file systems to mount on each node + in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. + :type mount_configuration: list[~azure.batch.models.MountConfiguration] + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'PoolState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'allocation_state': {'key': 'allocationState', 'type': 'AllocationState'}, + 'allocation_state_transition_time': {'key': 'allocationStateTransitionTime', 'type': 'iso-8601'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'resize_errors': {'key': 'resizeErrors', 'type': '[ResizeError]'}, + 'current_dedicated_nodes': {'key': 'currentDedicatedNodes', 'type': 'int'}, + 'current_low_priority_nodes': {'key': 'currentLowPriorityNodes', 'type': 'int'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'auto_scale_run': {'key': 'autoScaleRun', 'type': 'AutoScaleRun'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'stats': {'key': 'stats', 'type': 'PoolStatistics'}, + 'mount_configuration': {'key': 'mountConfiguration', 'type': '[MountConfiguration]'}, + } + + def __init__(self, *, id: str=None, display_name: str=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, allocation_state=None, allocation_state_transition_time=None, vm_size: str=None, cloud_service_configuration=None, virtual_machine_configuration=None, resize_timeout=None, resize_errors=None, current_dedicated_nodes: int=None, current_low_priority_nodes: int=None, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, enable_auto_scale: bool=None, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, auto_scale_run=None, enable_inter_node_communication: bool=None, network_configuration=None, start_task=None, certificate_references=None, application_package_references=None, application_licenses=None, max_tasks_per_node: int=None, task_scheduling_policy=None, user_accounts=None, metadata=None, stats=None, mount_configuration=None, **kwargs) -> None: + super(CloudPool, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.url = url + self.e_tag = e_tag + self.last_modified = last_modified + self.creation_time = creation_time + self.state = state + self.state_transition_time = state_transition_time + self.allocation_state = allocation_state + self.allocation_state_transition_time = allocation_state_transition_time + self.vm_size = vm_size + self.cloud_service_configuration = cloud_service_configuration + self.virtual_machine_configuration = virtual_machine_configuration + self.resize_timeout = resize_timeout + self.resize_errors = resize_errors + self.current_dedicated_nodes = current_dedicated_nodes + self.current_low_priority_nodes = current_low_priority_nodes + self.target_dedicated_nodes = target_dedicated_nodes + self.target_low_priority_nodes = target_low_priority_nodes + self.enable_auto_scale = enable_auto_scale + self.auto_scale_formula = auto_scale_formula + self.auto_scale_evaluation_interval = auto_scale_evaluation_interval + self.auto_scale_run = auto_scale_run + self.enable_inter_node_communication = enable_inter_node_communication + self.network_configuration = network_configuration + self.start_task = start_task + self.certificate_references = certificate_references + self.application_package_references = application_package_references + self.application_licenses = application_licenses + self.max_tasks_per_node = max_tasks_per_node + self.task_scheduling_policy = task_scheduling_policy + self.user_accounts = user_accounts + self.metadata = metadata + self.stats = stats + self.mount_configuration = mount_configuration diff --git a/azext/generated/sdk/batch/v2019_08_01/models/cloud_service_configuration.py b/azext/generated/sdk/batch/v2019_08_01/models/cloud_service_configuration.py new file mode 100644 index 00000000..b22fe7a3 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/cloud_service_configuration.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudServiceConfiguration(Model): + """The configuration for Compute Nodes in a Pool based on the Azure Cloud + Services platform. + + All required parameters must be populated in order to send to Azure. + + :param os_family: Required. The Azure Guest OS family to be installed on + the virtual machines in the Pool. Possible values are: + 2 - OS Family 2, equivalent to Windows Server 2008 R2 SP1. + 3 - OS Family 3, equivalent to Windows Server 2012. + 4 - OS Family 4, equivalent to Windows Server 2012 R2. + 5 - OS Family 5, equivalent to Windows Server 2016. + 6 - OS Family 6, equivalent to Windows Server 2019. For more information, + see Azure Guest OS Releases + (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). + :type os_family: str + :param os_version: The Azure Guest OS version to be installed on the + virtual machines in the Pool. The default value is * which specifies the + latest operating system version for the specified OS family. + :type os_version: str + """ + + _validation = { + 'os_family': {'required': True}, + } + + _attribute_map = { + 'os_family': {'key': 'osFamily', 'type': 'str'}, + 'os_version': {'key': 'osVersion', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(CloudServiceConfiguration, self).__init__(**kwargs) + self.os_family = kwargs.get('os_family', None) + self.os_version = kwargs.get('os_version', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/cloud_service_configuration_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/cloud_service_configuration_py3.py new file mode 100644 index 00000000..0e41d6c0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/cloud_service_configuration_py3.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudServiceConfiguration(Model): + """The configuration for Compute Nodes in a Pool based on the Azure Cloud + Services platform. + + All required parameters must be populated in order to send to Azure. + + :param os_family: Required. The Azure Guest OS family to be installed on + the virtual machines in the Pool. Possible values are: + 2 - OS Family 2, equivalent to Windows Server 2008 R2 SP1. + 3 - OS Family 3, equivalent to Windows Server 2012. + 4 - OS Family 4, equivalent to Windows Server 2012 R2. + 5 - OS Family 5, equivalent to Windows Server 2016. + 6 - OS Family 6, equivalent to Windows Server 2019. For more information, + see Azure Guest OS Releases + (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). + :type os_family: str + :param os_version: The Azure Guest OS version to be installed on the + virtual machines in the Pool. The default value is * which specifies the + latest operating system version for the specified OS family. + :type os_version: str + """ + + _validation = { + 'os_family': {'required': True}, + } + + _attribute_map = { + 'os_family': {'key': 'osFamily', 'type': 'str'}, + 'os_version': {'key': 'osVersion', 'type': 'str'}, + } + + def __init__(self, *, os_family: str, os_version: str=None, **kwargs) -> None: + super(CloudServiceConfiguration, self).__init__(**kwargs) + self.os_family = os_family + self.os_version = os_version diff --git a/azext/generated/sdk/batch/v2019_08_01/models/cloud_task.py b/azext/generated/sdk/batch/v2019_08_01/models/cloud_task.py new file mode 100644 index 00000000..e460c092 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/cloud_task.py @@ -0,0 +1,211 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudTask(Model): + """An Azure Batch Task. + + Batch will retry Tasks when a recovery operation is triggered on a Node. + Examples of recovery operations include (but are not limited to) when an + unhealthy Node is rebooted or a Compute Node disappeared due to host + failure. Retries due to recovery operations are independent of and are not + counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, + an internal retry due to a recovery operation may occur. Because of this, + all Tasks should be idempotent. This means Tasks need to tolerate being + interrupted and restarted without causing any corruption or duplicate data. + The best practice for long running Tasks is to use some form of + checkpointing. + + :param id: A string that uniquely identifies the Task within the Job. The + ID can contain any combination of alphanumeric characters including + hyphens and underscores, and cannot contain more than 64 characters. + :type id: str + :param display_name: A display name for the Task. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param url: The URL of the Task. + :type url: str + :param e_tag: The ETag of the Task. This is an opaque string. You can use + it to detect whether the Task has changed between requests. In particular, + you can be pass the ETag when updating a Task to specify that your changes + should take effect only if nobody else has modified the Task in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the Task. + :type last_modified: datetime + :param creation_time: The creation time of the Task. + :type creation_time: datetime + :param exit_conditions: How the Batch service should respond when the Task + completes. + :type exit_conditions: ~azure.batch.models.ExitConditions + :param state: The current state of the Task. Possible values include: + 'active', 'preparing', 'running', 'completed' + :type state: str or ~azure.batch.models.TaskState + :param state_transition_time: The time at which the Task entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the Task. This property is + not set if the Task is in its initial Active state. Possible values + include: 'active', 'preparing', 'running', 'completed' + :type previous_state: str or ~azure.batch.models.TaskState + :param previous_state_transition_time: The time at which the Task entered + its previous state. This property is not set if the Task is in its initial + Active state. + :type previous_state_transition_time: datetime + :param command_line: The command line of the Task. For multi-instance + Tasks, the command line is executed as the primary Task, after the primary + Task and all subtasks have finished executing the coordination command + line. The command line does not run under a shell, and therefore cannot + take advantage of shell features such as environment variable expansion. + If you want to take advantage of such features, you should invoke the + shell in the command line, for example using "cmd /c MyCommand" in Windows + or "/bin/sh -c MyCommand" in Linux. If the command line refers to file + paths, it should use a relative path (relative to the Task working + directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Task runs. If the Pool that will run this Task has containerConfiguration + set, this must be set as well. If the Pool that will run this Task doesn't + have containerConfiguration set, this must not be set. When this is + specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR + (the root of Azure Batch directories on the node) are mapped into the + container, all Task environment variables are mapped into the container, + and the Task command line is executed in the container. Files produced in + the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to + the host disk, meaning that Batch file APIs will not be able to access + those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. For + multi-instance Tasks, the resource files will only be downloaded to the + Compute Node on which the primary Task is executed. There is a maximum + size for the list of resource files. When the max size is exceeded, the + request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the Compute Node after running the command line. For multi-instance + Tasks, the files will only be uploaded from the Compute Node on which the + primary Task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the Task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param affinity_info: A locality hint that can be used by the Batch + service to select a Compute Node on which to start the new Task. + :type affinity_info: ~azure.batch.models.AffinityInformation + :param constraints: The execution constraints that apply to this Task. + :type constraints: ~azure.batch.models.TaskConstraints + :param user_identity: The user identity under which the Task runs. If + omitted, the Task runs as a non-administrative user unique to the Task. + :type user_identity: ~azure.batch.models.UserIdentity + :param execution_info: Information about the execution of the Task. + :type execution_info: ~azure.batch.models.TaskExecutionInformation + :param node_info: Information about the Compute Node on which the Task + ran. + :type node_info: ~azure.batch.models.ComputeNodeInformation + :param multi_instance_settings: An object that indicates that the Task is + a multi-instance Task, and contains information about how to run the + multi-instance Task. + :type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings + :param stats: Resource usage statistics for the Task. + :type stats: ~azure.batch.models.TaskStatistics + :param depends_on: The Tasks that this Task depends on. This Task will not + be scheduled until all Tasks that it depends on have completed + successfully. If any of those Tasks fail and exhaust their retry counts, + this Task will never be scheduled. + :type depends_on: ~azure.batch.models.TaskDependencies + :param application_package_references: A list of Packages that the Batch + service will deploy to the Compute Node before running the command line. + Application packages are downloaded and deployed to a shared directory, + not the Task working directory. Therefore, if a referenced package is + already on the Node, and is up to date, then it is not re-downloaded; the + existing copy on the Compute Node is used. If a referenced Package cannot + be installed, for example because the package has been deleted or because + download failed, the Task fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the Task can use to perform Batch service operations. If this + property is set, the Batch service provides the Task with an + authentication token which can be used to authenticate Batch service + operations without requiring an Account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the Task can carry out using the token depend on the settings. For + example, a Task can request Job permissions in order to add other Tasks to + the Job, or check the status of the Job or of other Tasks under the Job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'}, + 'state': {'key': 'state', 'type': 'TaskState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'TaskState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'execution_info': {'key': 'executionInfo', 'type': 'TaskExecutionInformation'}, + 'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'}, + 'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'}, + 'stats': {'key': 'stats', 'type': 'TaskStatistics'}, + 'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + } + + def __init__(self, **kwargs): + super(CloudTask, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.url = kwargs.get('url', None) + self.e_tag = kwargs.get('e_tag', None) + self.last_modified = kwargs.get('last_modified', None) + self.creation_time = kwargs.get('creation_time', None) + self.exit_conditions = kwargs.get('exit_conditions', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.previous_state = kwargs.get('previous_state', None) + self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.resource_files = kwargs.get('resource_files', None) + self.output_files = kwargs.get('output_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.affinity_info = kwargs.get('affinity_info', None) + self.constraints = kwargs.get('constraints', None) + self.user_identity = kwargs.get('user_identity', None) + self.execution_info = kwargs.get('execution_info', None) + self.node_info = kwargs.get('node_info', None) + self.multi_instance_settings = kwargs.get('multi_instance_settings', None) + self.stats = kwargs.get('stats', None) + self.depends_on = kwargs.get('depends_on', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.authentication_token_settings = kwargs.get('authentication_token_settings', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/cloud_task_list_subtasks_result.py b/azext/generated/sdk/batch/v2019_08_01/models/cloud_task_list_subtasks_result.py new file mode 100644 index 00000000..c892bfe0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/cloud_task_list_subtasks_result.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudTaskListSubtasksResult(Model): + """The result of listing the subtasks of a Task. + + :param value: The list of subtasks. + :type value: list[~azure.batch.models.SubtaskInformation] + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[SubtaskInformation]'}, + } + + def __init__(self, **kwargs): + super(CloudTaskListSubtasksResult, self).__init__(**kwargs) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/cloud_task_list_subtasks_result_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/cloud_task_list_subtasks_result_py3.py new file mode 100644 index 00000000..3ab59743 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/cloud_task_list_subtasks_result_py3.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudTaskListSubtasksResult(Model): + """The result of listing the subtasks of a Task. + + :param value: The list of subtasks. + :type value: list[~azure.batch.models.SubtaskInformation] + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[SubtaskInformation]'}, + } + + def __init__(self, *, value=None, **kwargs) -> None: + super(CloudTaskListSubtasksResult, self).__init__(**kwargs) + self.value = value diff --git a/azext/generated/sdk/batch/v2019_08_01/models/cloud_task_paged.py b/azext/generated/sdk/batch/v2019_08_01/models/cloud_task_paged.py new file mode 100644 index 00000000..3d8ef774 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/cloud_task_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class CloudTaskPaged(Paged): + """ + A paging container for iterating over a list of :class:`CloudTask ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[CloudTask]'} + } + + def __init__(self, *args, **kwargs): + + super(CloudTaskPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/cloud_task_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/cloud_task_py3.py new file mode 100644 index 00000000..096f3717 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/cloud_task_py3.py @@ -0,0 +1,211 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CloudTask(Model): + """An Azure Batch Task. + + Batch will retry Tasks when a recovery operation is triggered on a Node. + Examples of recovery operations include (but are not limited to) when an + unhealthy Node is rebooted or a Compute Node disappeared due to host + failure. Retries due to recovery operations are independent of and are not + counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, + an internal retry due to a recovery operation may occur. Because of this, + all Tasks should be idempotent. This means Tasks need to tolerate being + interrupted and restarted without causing any corruption or duplicate data. + The best practice for long running Tasks is to use some form of + checkpointing. + + :param id: A string that uniquely identifies the Task within the Job. The + ID can contain any combination of alphanumeric characters including + hyphens and underscores, and cannot contain more than 64 characters. + :type id: str + :param display_name: A display name for the Task. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param url: The URL of the Task. + :type url: str + :param e_tag: The ETag of the Task. This is an opaque string. You can use + it to detect whether the Task has changed between requests. In particular, + you can be pass the ETag when updating a Task to specify that your changes + should take effect only if nobody else has modified the Task in the + meantime. + :type e_tag: str + :param last_modified: The last modified time of the Task. + :type last_modified: datetime + :param creation_time: The creation time of the Task. + :type creation_time: datetime + :param exit_conditions: How the Batch service should respond when the Task + completes. + :type exit_conditions: ~azure.batch.models.ExitConditions + :param state: The current state of the Task. Possible values include: + 'active', 'preparing', 'running', 'completed' + :type state: str or ~azure.batch.models.TaskState + :param state_transition_time: The time at which the Task entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the Task. This property is + not set if the Task is in its initial Active state. Possible values + include: 'active', 'preparing', 'running', 'completed' + :type previous_state: str or ~azure.batch.models.TaskState + :param previous_state_transition_time: The time at which the Task entered + its previous state. This property is not set if the Task is in its initial + Active state. + :type previous_state_transition_time: datetime + :param command_line: The command line of the Task. For multi-instance + Tasks, the command line is executed as the primary Task, after the primary + Task and all subtasks have finished executing the coordination command + line. The command line does not run under a shell, and therefore cannot + take advantage of shell features such as environment variable expansion. + If you want to take advantage of such features, you should invoke the + shell in the command line, for example using "cmd /c MyCommand" in Windows + or "/bin/sh -c MyCommand" in Linux. If the command line refers to file + paths, it should use a relative path (relative to the Task working + directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Task runs. If the Pool that will run this Task has containerConfiguration + set, this must be set as well. If the Pool that will run this Task doesn't + have containerConfiguration set, this must not be set. When this is + specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR + (the root of Azure Batch directories on the node) are mapped into the + container, all Task environment variables are mapped into the container, + and the Task command line is executed in the container. Files produced in + the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to + the host disk, meaning that Batch file APIs will not be able to access + those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. For + multi-instance Tasks, the resource files will only be downloaded to the + Compute Node on which the primary Task is executed. There is a maximum + size for the list of resource files. When the max size is exceeded, the + request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the Compute Node after running the command line. For multi-instance + Tasks, the files will only be uploaded from the Compute Node on which the + primary Task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the Task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param affinity_info: A locality hint that can be used by the Batch + service to select a Compute Node on which to start the new Task. + :type affinity_info: ~azure.batch.models.AffinityInformation + :param constraints: The execution constraints that apply to this Task. + :type constraints: ~azure.batch.models.TaskConstraints + :param user_identity: The user identity under which the Task runs. If + omitted, the Task runs as a non-administrative user unique to the Task. + :type user_identity: ~azure.batch.models.UserIdentity + :param execution_info: Information about the execution of the Task. + :type execution_info: ~azure.batch.models.TaskExecutionInformation + :param node_info: Information about the Compute Node on which the Task + ran. + :type node_info: ~azure.batch.models.ComputeNodeInformation + :param multi_instance_settings: An object that indicates that the Task is + a multi-instance Task, and contains information about how to run the + multi-instance Task. + :type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings + :param stats: Resource usage statistics for the Task. + :type stats: ~azure.batch.models.TaskStatistics + :param depends_on: The Tasks that this Task depends on. This Task will not + be scheduled until all Tasks that it depends on have completed + successfully. If any of those Tasks fail and exhaust their retry counts, + this Task will never be scheduled. + :type depends_on: ~azure.batch.models.TaskDependencies + :param application_package_references: A list of Packages that the Batch + service will deploy to the Compute Node before running the command line. + Application packages are downloaded and deployed to a shared directory, + not the Task working directory. Therefore, if a referenced package is + already on the Node, and is up to date, then it is not re-downloaded; the + existing copy on the Compute Node is used. If a referenced Package cannot + be installed, for example because the package has been deleted or because + download failed, the Task fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the Task can use to perform Batch service operations. If this + property is set, the Batch service provides the Task with an + authentication token which can be used to authenticate Batch service + operations without requiring an Account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the Task can carry out using the token depend on the settings. For + example, a Task can request Job permissions in order to add other Tasks to + the Job, or check the status of the Job or of other Tasks under the Job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'}, + 'state': {'key': 'state', 'type': 'TaskState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'TaskState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'execution_info': {'key': 'executionInfo', 'type': 'TaskExecutionInformation'}, + 'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'}, + 'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'}, + 'stats': {'key': 'stats', 'type': 'TaskStatistics'}, + 'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + } + + def __init__(self, *, id: str=None, display_name: str=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, exit_conditions=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, command_line: str=None, container_settings=None, resource_files=None, output_files=None, environment_settings=None, affinity_info=None, constraints=None, user_identity=None, execution_info=None, node_info=None, multi_instance_settings=None, stats=None, depends_on=None, application_package_references=None, authentication_token_settings=None, **kwargs) -> None: + super(CloudTask, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.url = url + self.e_tag = e_tag + self.last_modified = last_modified + self.creation_time = creation_time + self.exit_conditions = exit_conditions + self.state = state + self.state_transition_time = state_transition_time + self.previous_state = previous_state + self.previous_state_transition_time = previous_state_transition_time + self.command_line = command_line + self.container_settings = container_settings + self.resource_files = resource_files + self.output_files = output_files + self.environment_settings = environment_settings + self.affinity_info = affinity_info + self.constraints = constraints + self.user_identity = user_identity + self.execution_info = execution_info + self.node_info = node_info + self.multi_instance_settings = multi_instance_settings + self.stats = stats + self.depends_on = depends_on + self.application_package_references = application_package_references + self.authentication_token_settings = authentication_token_settings diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node.py new file mode 100644 index 00000000..ed38ca8f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node.py @@ -0,0 +1,160 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNode(Model): + """A Compute Node in the Batch service. + + :param id: The ID of the Compute Node. Every Compute Node that is added to + a Pool is assigned a unique ID. Whenever a Compute Node is removed from a + Pool, all of its local files are deleted, and the ID is reclaimed and + could be reused for new Compute Nodes. + :type id: str + :param url: The URL of the Compute Node. + :type url: str + :param state: The current state of the Compute Node. The low-priority + Compute Node has been preempted. Tasks which were running on the Compute + Node when it was preempted will be rescheduled when another Compute Node + becomes available. Possible values include: 'idle', 'rebooting', + 'reimaging', 'running', 'unusable', 'creating', 'starting', + 'waitingForStartTask', 'startTaskFailed', 'unknown', 'leavingPool', + 'offline', 'preempted' + :type state: str or ~azure.batch.models.ComputeNodeState + :param scheduling_state: Whether the Compute Node is available for Task + scheduling. Possible values include: 'enabled', 'disabled' + :type scheduling_state: str or ~azure.batch.models.SchedulingState + :param state_transition_time: The time at which the Compute Node entered + its current state. + :type state_transition_time: datetime + :param last_boot_time: The last time at which the Compute Node was + started. This property may not be present if the Compute Node state is + unusable. + :type last_boot_time: datetime + :param allocation_time: The time at which this Compute Node was allocated + to the Pool. This is the time when the Compute Node was initially + allocated and doesn't change once set. It is not updated when the Compute + Node is service healed or preempted. + :type allocation_time: datetime + :param ip_address: The IP address that other Nodes can use to communicate + with this Compute Node. Every Compute Node that is added to a Pool is + assigned a unique IP address. Whenever a Compute Node is removed from a + Pool, all of its local files are deleted, and the IP address is reclaimed + and could be reused for new Compute Nodes. + :type ip_address: str + :param affinity_id: An identifier which can be passed when adding a Task + to request that the Task be scheduled on this Compute Node. Note that this + is just a soft affinity. If the target Compute Node is busy or unavailable + at the time the Task is scheduled, then the Task will be scheduled + elsewhere. + :type affinity_id: str + :param vm_size: The size of the virtual machine hosting the Compute Node. + For information about available sizes of virtual machines in Pools, see + Choose a VM size for Compute Nodes in an Azure Batch Pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param total_tasks_run: The total number of Job Tasks completed on the + Compute Node. This includes Job Manager Tasks and normal Tasks, but not + Job Preparation, Job Release or Start Tasks. + :type total_tasks_run: int + :param running_tasks_count: The total number of currently running Job + Tasks on the Compute Node. This includes Job Manager Tasks and normal + Tasks, but not Job Preparation, Job Release or Start Tasks. + :type running_tasks_count: int + :param total_tasks_succeeded: The total number of Job Tasks which + completed successfully (with exitCode 0) on the Compute Node. This + includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job + Release or Start Tasks. + :type total_tasks_succeeded: int + :param recent_tasks: A list of Tasks whose state has recently changed. + This property is present only if at least one Task has run on this Compute + Node since it was assigned to the Pool. + :type recent_tasks: list[~azure.batch.models.TaskInformation] + :param start_task: The Task specified to run on the Compute Node as it + joins the Pool. + :type start_task: ~azure.batch.models.StartTask + :param start_task_info: Runtime information about the execution of the + StartTask on the Compute Node. + :type start_task_info: ~azure.batch.models.StartTaskInformation + :param certificate_references: The list of Certificates installed on the + Compute Node. For Windows Nodes, the Batch service installs the + Certificates to the specified Certificate store and location. For Linux + Compute Nodes, the Certificates are stored in a directory inside the Task + working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is + supplied to the Task to query for this location. For Certificates with + visibility of 'remoteUser', a 'certs' directory is created in the user's + home directory (e.g., /home/{user-name}/certs) and Certificates are placed + in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param errors: The list of errors that are currently being encountered by + the Compute Node. + :type errors: list[~azure.batch.models.ComputeNodeError] + :param is_dedicated: Whether this Compute Node is a dedicated Compute + Node. If false, the Compute Node is a low-priority Compute Node. + :type is_dedicated: bool + :param endpoint_configuration: The endpoint configuration for the Compute + Node. + :type endpoint_configuration: + ~azure.batch.models.ComputeNodeEndpointConfiguration + :param node_agent_info: Information about the Compute Node agent version + and the time the Compute Node upgraded to a new version. + :type node_agent_info: ~azure.batch.models.NodeAgentInformation + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'ComputeNodeState'}, + 'scheduling_state': {'key': 'schedulingState', 'type': 'SchedulingState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'last_boot_time': {'key': 'lastBootTime', 'type': 'iso-8601'}, + 'allocation_time': {'key': 'allocationTime', 'type': 'iso-8601'}, + 'ip_address': {'key': 'ipAddress', 'type': 'str'}, + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'total_tasks_run': {'key': 'totalTasksRun', 'type': 'int'}, + 'running_tasks_count': {'key': 'runningTasksCount', 'type': 'int'}, + 'total_tasks_succeeded': {'key': 'totalTasksSucceeded', 'type': 'int'}, + 'recent_tasks': {'key': 'recentTasks', 'type': '[TaskInformation]'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'start_task_info': {'key': 'startTaskInfo', 'type': 'StartTaskInformation'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'errors': {'key': 'errors', 'type': '[ComputeNodeError]'}, + 'is_dedicated': {'key': 'isDedicated', 'type': 'bool'}, + 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'ComputeNodeEndpointConfiguration'}, + 'node_agent_info': {'key': 'nodeAgentInfo', 'type': 'NodeAgentInformation'}, + } + + def __init__(self, **kwargs): + super(ComputeNode, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.url = kwargs.get('url', None) + self.state = kwargs.get('state', None) + self.scheduling_state = kwargs.get('scheduling_state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.last_boot_time = kwargs.get('last_boot_time', None) + self.allocation_time = kwargs.get('allocation_time', None) + self.ip_address = kwargs.get('ip_address', None) + self.affinity_id = kwargs.get('affinity_id', None) + self.vm_size = kwargs.get('vm_size', None) + self.total_tasks_run = kwargs.get('total_tasks_run', None) + self.running_tasks_count = kwargs.get('running_tasks_count', None) + self.total_tasks_succeeded = kwargs.get('total_tasks_succeeded', None) + self.recent_tasks = kwargs.get('recent_tasks', None) + self.start_task = kwargs.get('start_task', None) + self.start_task_info = kwargs.get('start_task_info', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.errors = kwargs.get('errors', None) + self.is_dedicated = kwargs.get('is_dedicated', None) + self.endpoint_configuration = kwargs.get('endpoint_configuration', None) + self.node_agent_info = kwargs.get('node_agent_info', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_add_user_options.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_add_user_options.py new file mode 100644 index 00000000..89020475 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_add_user_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeAddUserOptions(Model): + """Additional parameters for add_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeAddUserOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_add_user_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_add_user_options_py3.py new file mode 100644 index 00000000..dab4040b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_add_user_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeAddUserOptions(Model): + """Additional parameters for add_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeAddUserOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_delete_user_options.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_delete_user_options.py new file mode 100644 index 00000000..4874a98a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_delete_user_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeDeleteUserOptions(Model): + """Additional parameters for delete_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeDeleteUserOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_delete_user_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_delete_user_options_py3.py new file mode 100644 index 00000000..88217b93 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_delete_user_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeDeleteUserOptions(Model): + """Additional parameters for delete_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeDeleteUserOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_disable_scheduling_options.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_disable_scheduling_options.py new file mode 100644 index 00000000..92bf2911 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_disable_scheduling_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeDisableSchedulingOptions(Model): + """Additional parameters for disable_scheduling operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeDisableSchedulingOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_disable_scheduling_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_disable_scheduling_options_py3.py new file mode 100644 index 00000000..0432c5db --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_disable_scheduling_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeDisableSchedulingOptions(Model): + """Additional parameters for disable_scheduling operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeDisableSchedulingOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_enable_scheduling_options.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_enable_scheduling_options.py new file mode 100644 index 00000000..905e3e34 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_enable_scheduling_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeEnableSchedulingOptions(Model): + """Additional parameters for enable_scheduling operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeEnableSchedulingOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_enable_scheduling_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_enable_scheduling_options_py3.py new file mode 100644 index 00000000..4ef5d9ad --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_enable_scheduling_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeEnableSchedulingOptions(Model): + """Additional parameters for enable_scheduling operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeEnableSchedulingOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_endpoint_configuration.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_endpoint_configuration.py new file mode 100644 index 00000000..ca48b8f1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_endpoint_configuration.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeEndpointConfiguration(Model): + """The endpoint configuration for the Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param inbound_endpoints: Required. The list of inbound endpoints that are + accessible on the Compute Node. + :type inbound_endpoints: list[~azure.batch.models.InboundEndpoint] + """ + + _validation = { + 'inbound_endpoints': {'required': True}, + } + + _attribute_map = { + 'inbound_endpoints': {'key': 'inboundEndpoints', 'type': '[InboundEndpoint]'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeEndpointConfiguration, self).__init__(**kwargs) + self.inbound_endpoints = kwargs.get('inbound_endpoints', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_endpoint_configuration_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_endpoint_configuration_py3.py new file mode 100644 index 00000000..4a29c553 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_endpoint_configuration_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeEndpointConfiguration(Model): + """The endpoint configuration for the Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param inbound_endpoints: Required. The list of inbound endpoints that are + accessible on the Compute Node. + :type inbound_endpoints: list[~azure.batch.models.InboundEndpoint] + """ + + _validation = { + 'inbound_endpoints': {'required': True}, + } + + _attribute_map = { + 'inbound_endpoints': {'key': 'inboundEndpoints', 'type': '[InboundEndpoint]'}, + } + + def __init__(self, *, inbound_endpoints, **kwargs) -> None: + super(ComputeNodeEndpointConfiguration, self).__init__(**kwargs) + self.inbound_endpoints = inbound_endpoints diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_error.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_error.py new file mode 100644 index 00000000..eb284933 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_error.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeError(Model): + """An error encountered by a Compute Node. + + :param code: An identifier for the Compute Node error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the Compute Node error, intended to + be suitable for display in a user interface. + :type message: str + :param error_details: The list of additional error details related to the + Compute Node error. + :type error_details: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'error_details': {'key': 'errorDetails', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.error_details = kwargs.get('error_details', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_error_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_error_py3.py new file mode 100644 index 00000000..ef0a84da --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_error_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeError(Model): + """An error encountered by a Compute Node. + + :param code: An identifier for the Compute Node error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the Compute Node error, intended to + be suitable for display in a user interface. + :type message: str + :param error_details: The list of additional error details related to the + Compute Node error. + :type error_details: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'error_details': {'key': 'errorDetails', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, code: str=None, message: str=None, error_details=None, **kwargs) -> None: + super(ComputeNodeError, self).__init__(**kwargs) + self.code = code + self.message = message + self.error_details = error_details diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_options.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_options.py new file mode 100644 index 00000000..6218d444 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_options.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_options_py3.py new file mode 100644 index 00000000..de6284b3 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_options_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeGetOptions, self).__init__(**kwargs) + self.select = select + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_remote_desktop_options.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_remote_desktop_options.py new file mode 100644 index 00000000..20af5558 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_remote_desktop_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteDesktopOptions(Model): + """Additional parameters for get_remote_desktop operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeGetRemoteDesktopOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_remote_desktop_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_remote_desktop_options_py3.py new file mode 100644 index 00000000..d79ce622 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_remote_desktop_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteDesktopOptions(Model): + """Additional parameters for get_remote_desktop operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeGetRemoteDesktopOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_remote_login_settings_options.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_remote_login_settings_options.py new file mode 100644 index 00000000..9c01ed5f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_remote_login_settings_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteLoginSettingsOptions(Model): + """Additional parameters for get_remote_login_settings operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeGetRemoteLoginSettingsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_remote_login_settings_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_remote_login_settings_options_py3.py new file mode 100644 index 00000000..2d7987ab --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_remote_login_settings_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteLoginSettingsOptions(Model): + """Additional parameters for get_remote_login_settings operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeGetRemoteLoginSettingsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_remote_login_settings_result.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_remote_login_settings_result.py new file mode 100644 index 00000000..56060d8b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_remote_login_settings_result.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteLoginSettingsResult(Model): + """The remote login settings for a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param remote_login_ip_address: Required. The IP address used for remote + login to the Compute Node. + :type remote_login_ip_address: str + :param remote_login_port: Required. The port used for remote login to the + Compute Node. + :type remote_login_port: int + """ + + _validation = { + 'remote_login_ip_address': {'required': True}, + 'remote_login_port': {'required': True}, + } + + _attribute_map = { + 'remote_login_ip_address': {'key': 'remoteLoginIPAddress', 'type': 'str'}, + 'remote_login_port': {'key': 'remoteLoginPort', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeGetRemoteLoginSettingsResult, self).__init__(**kwargs) + self.remote_login_ip_address = kwargs.get('remote_login_ip_address', None) + self.remote_login_port = kwargs.get('remote_login_port', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_remote_login_settings_result_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_remote_login_settings_result_py3.py new file mode 100644 index 00000000..a2120536 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_get_remote_login_settings_result_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeGetRemoteLoginSettingsResult(Model): + """The remote login settings for a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param remote_login_ip_address: Required. The IP address used for remote + login to the Compute Node. + :type remote_login_ip_address: str + :param remote_login_port: Required. The port used for remote login to the + Compute Node. + :type remote_login_port: int + """ + + _validation = { + 'remote_login_ip_address': {'required': True}, + 'remote_login_port': {'required': True}, + } + + _attribute_map = { + 'remote_login_ip_address': {'key': 'remoteLoginIPAddress', 'type': 'str'}, + 'remote_login_port': {'key': 'remoteLoginPort', 'type': 'int'}, + } + + def __init__(self, *, remote_login_ip_address: str, remote_login_port: int, **kwargs) -> None: + super(ComputeNodeGetRemoteLoginSettingsResult, self).__init__(**kwargs) + self.remote_login_ip_address = remote_login_ip_address + self.remote_login_port = remote_login_port diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_information.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_information.py new file mode 100644 index 00000000..0f9677ac --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_information.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeInformation(Model): + """Information about the Compute Node on which a Task ran. + + :param affinity_id: An identifier for the Node on which the Task ran, + which can be passed when adding a Task to request that the Task be + scheduled on this Compute Node. + :type affinity_id: str + :param node_url: The URL of the Compute Node on which the Task ran. . + :type node_url: str + :param pool_id: The ID of the Pool on which the Task ran. + :type pool_id: str + :param node_id: The ID of the Compute Node on which the Task ran. + :type node_id: str + :param task_root_directory: The root directory of the Task on the Compute + Node. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the Task + on the Compute Node. + :type task_root_directory_url: str + """ + + _attribute_map = { + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + 'node_url': {'key': 'nodeUrl', 'type': 'str'}, + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'node_id': {'key': 'nodeId', 'type': 'str'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeInformation, self).__init__(**kwargs) + self.affinity_id = kwargs.get('affinity_id', None) + self.node_url = kwargs.get('node_url', None) + self.pool_id = kwargs.get('pool_id', None) + self.node_id = kwargs.get('node_id', None) + self.task_root_directory = kwargs.get('task_root_directory', None) + self.task_root_directory_url = kwargs.get('task_root_directory_url', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_information_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_information_py3.py new file mode 100644 index 00000000..a40ca517 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_information_py3.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeInformation(Model): + """Information about the Compute Node on which a Task ran. + + :param affinity_id: An identifier for the Node on which the Task ran, + which can be passed when adding a Task to request that the Task be + scheduled on this Compute Node. + :type affinity_id: str + :param node_url: The URL of the Compute Node on which the Task ran. . + :type node_url: str + :param pool_id: The ID of the Pool on which the Task ran. + :type pool_id: str + :param node_id: The ID of the Compute Node on which the Task ran. + :type node_id: str + :param task_root_directory: The root directory of the Task on the Compute + Node. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the Task + on the Compute Node. + :type task_root_directory_url: str + """ + + _attribute_map = { + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + 'node_url': {'key': 'nodeUrl', 'type': 'str'}, + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'node_id': {'key': 'nodeId', 'type': 'str'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + } + + def __init__(self, *, affinity_id: str=None, node_url: str=None, pool_id: str=None, node_id: str=None, task_root_directory: str=None, task_root_directory_url: str=None, **kwargs) -> None: + super(ComputeNodeInformation, self).__init__(**kwargs) + self.affinity_id = affinity_id + self.node_url = node_url + self.pool_id = pool_id + self.node_id = node_id + self.task_root_directory = task_root_directory + self.task_root_directory_url = task_root_directory_url diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_list_options.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_list_options.py new file mode 100644 index 00000000..a8e5602b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_list_options.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Compute Nodes can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_list_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_list_options_py3.py new file mode 100644 index 00000000..323bf2f5 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_list_options_py3.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Compute Nodes can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_paged.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_paged.py new file mode 100644 index 00000000..26f41dcb --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class ComputeNodePaged(Paged): + """ + A paging container for iterating over a list of :class:`ComputeNode ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[ComputeNode]'} + } + + def __init__(self, *args, **kwargs): + + super(ComputeNodePaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_py3.py new file mode 100644 index 00000000..fc13b0a5 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_py3.py @@ -0,0 +1,160 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNode(Model): + """A Compute Node in the Batch service. + + :param id: The ID of the Compute Node. Every Compute Node that is added to + a Pool is assigned a unique ID. Whenever a Compute Node is removed from a + Pool, all of its local files are deleted, and the ID is reclaimed and + could be reused for new Compute Nodes. + :type id: str + :param url: The URL of the Compute Node. + :type url: str + :param state: The current state of the Compute Node. The low-priority + Compute Node has been preempted. Tasks which were running on the Compute + Node when it was preempted will be rescheduled when another Compute Node + becomes available. Possible values include: 'idle', 'rebooting', + 'reimaging', 'running', 'unusable', 'creating', 'starting', + 'waitingForStartTask', 'startTaskFailed', 'unknown', 'leavingPool', + 'offline', 'preempted' + :type state: str or ~azure.batch.models.ComputeNodeState + :param scheduling_state: Whether the Compute Node is available for Task + scheduling. Possible values include: 'enabled', 'disabled' + :type scheduling_state: str or ~azure.batch.models.SchedulingState + :param state_transition_time: The time at which the Compute Node entered + its current state. + :type state_transition_time: datetime + :param last_boot_time: The last time at which the Compute Node was + started. This property may not be present if the Compute Node state is + unusable. + :type last_boot_time: datetime + :param allocation_time: The time at which this Compute Node was allocated + to the Pool. This is the time when the Compute Node was initially + allocated and doesn't change once set. It is not updated when the Compute + Node is service healed or preempted. + :type allocation_time: datetime + :param ip_address: The IP address that other Nodes can use to communicate + with this Compute Node. Every Compute Node that is added to a Pool is + assigned a unique IP address. Whenever a Compute Node is removed from a + Pool, all of its local files are deleted, and the IP address is reclaimed + and could be reused for new Compute Nodes. + :type ip_address: str + :param affinity_id: An identifier which can be passed when adding a Task + to request that the Task be scheduled on this Compute Node. Note that this + is just a soft affinity. If the target Compute Node is busy or unavailable + at the time the Task is scheduled, then the Task will be scheduled + elsewhere. + :type affinity_id: str + :param vm_size: The size of the virtual machine hosting the Compute Node. + For information about available sizes of virtual machines in Pools, see + Choose a VM size for Compute Nodes in an Azure Batch Pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param total_tasks_run: The total number of Job Tasks completed on the + Compute Node. This includes Job Manager Tasks and normal Tasks, but not + Job Preparation, Job Release or Start Tasks. + :type total_tasks_run: int + :param running_tasks_count: The total number of currently running Job + Tasks on the Compute Node. This includes Job Manager Tasks and normal + Tasks, but not Job Preparation, Job Release or Start Tasks. + :type running_tasks_count: int + :param total_tasks_succeeded: The total number of Job Tasks which + completed successfully (with exitCode 0) on the Compute Node. This + includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job + Release or Start Tasks. + :type total_tasks_succeeded: int + :param recent_tasks: A list of Tasks whose state has recently changed. + This property is present only if at least one Task has run on this Compute + Node since it was assigned to the Pool. + :type recent_tasks: list[~azure.batch.models.TaskInformation] + :param start_task: The Task specified to run on the Compute Node as it + joins the Pool. + :type start_task: ~azure.batch.models.StartTask + :param start_task_info: Runtime information about the execution of the + StartTask on the Compute Node. + :type start_task_info: ~azure.batch.models.StartTaskInformation + :param certificate_references: The list of Certificates installed on the + Compute Node. For Windows Nodes, the Batch service installs the + Certificates to the specified Certificate store and location. For Linux + Compute Nodes, the Certificates are stored in a directory inside the Task + working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is + supplied to the Task to query for this location. For Certificates with + visibility of 'remoteUser', a 'certs' directory is created in the user's + home directory (e.g., /home/{user-name}/certs) and Certificates are placed + in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param errors: The list of errors that are currently being encountered by + the Compute Node. + :type errors: list[~azure.batch.models.ComputeNodeError] + :param is_dedicated: Whether this Compute Node is a dedicated Compute + Node. If false, the Compute Node is a low-priority Compute Node. + :type is_dedicated: bool + :param endpoint_configuration: The endpoint configuration for the Compute + Node. + :type endpoint_configuration: + ~azure.batch.models.ComputeNodeEndpointConfiguration + :param node_agent_info: Information about the Compute Node agent version + and the time the Compute Node upgraded to a new version. + :type node_agent_info: ~azure.batch.models.NodeAgentInformation + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'ComputeNodeState'}, + 'scheduling_state': {'key': 'schedulingState', 'type': 'SchedulingState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'last_boot_time': {'key': 'lastBootTime', 'type': 'iso-8601'}, + 'allocation_time': {'key': 'allocationTime', 'type': 'iso-8601'}, + 'ip_address': {'key': 'ipAddress', 'type': 'str'}, + 'affinity_id': {'key': 'affinityId', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'total_tasks_run': {'key': 'totalTasksRun', 'type': 'int'}, + 'running_tasks_count': {'key': 'runningTasksCount', 'type': 'int'}, + 'total_tasks_succeeded': {'key': 'totalTasksSucceeded', 'type': 'int'}, + 'recent_tasks': {'key': 'recentTasks', 'type': '[TaskInformation]'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'start_task_info': {'key': 'startTaskInfo', 'type': 'StartTaskInformation'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'errors': {'key': 'errors', 'type': '[ComputeNodeError]'}, + 'is_dedicated': {'key': 'isDedicated', 'type': 'bool'}, + 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'ComputeNodeEndpointConfiguration'}, + 'node_agent_info': {'key': 'nodeAgentInfo', 'type': 'NodeAgentInformation'}, + } + + def __init__(self, *, id: str=None, url: str=None, state=None, scheduling_state=None, state_transition_time=None, last_boot_time=None, allocation_time=None, ip_address: str=None, affinity_id: str=None, vm_size: str=None, total_tasks_run: int=None, running_tasks_count: int=None, total_tasks_succeeded: int=None, recent_tasks=None, start_task=None, start_task_info=None, certificate_references=None, errors=None, is_dedicated: bool=None, endpoint_configuration=None, node_agent_info=None, **kwargs) -> None: + super(ComputeNode, self).__init__(**kwargs) + self.id = id + self.url = url + self.state = state + self.scheduling_state = scheduling_state + self.state_transition_time = state_transition_time + self.last_boot_time = last_boot_time + self.allocation_time = allocation_time + self.ip_address = ip_address + self.affinity_id = affinity_id + self.vm_size = vm_size + self.total_tasks_run = total_tasks_run + self.running_tasks_count = running_tasks_count + self.total_tasks_succeeded = total_tasks_succeeded + self.recent_tasks = recent_tasks + self.start_task = start_task + self.start_task_info = start_task_info + self.certificate_references = certificate_references + self.errors = errors + self.is_dedicated = is_dedicated + self.endpoint_configuration = endpoint_configuration + self.node_agent_info = node_agent_info diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_reboot_options.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_reboot_options.py new file mode 100644 index 00000000..182c563e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_reboot_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeRebootOptions(Model): + """Additional parameters for reboot operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeRebootOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_reboot_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_reboot_options_py3.py new file mode 100644 index 00000000..97e8cb41 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_reboot_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeRebootOptions(Model): + """Additional parameters for reboot operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeRebootOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_reimage_options.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_reimage_options.py new file mode 100644 index 00000000..8ec6e55f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_reimage_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeReimageOptions(Model): + """Additional parameters for reimage operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeReimageOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_reimage_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_reimage_options_py3.py new file mode 100644 index 00000000..dcff3ee8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_reimage_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeReimageOptions(Model): + """Additional parameters for reimage operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeReimageOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_update_user_options.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_update_user_options.py new file mode 100644 index 00000000..ed1f9548 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_update_user_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUpdateUserOptions(Model): + """Additional parameters for update_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeUpdateUserOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_update_user_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_update_user_options_py3.py new file mode 100644 index 00000000..81e45b6c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_update_user_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUpdateUserOptions(Model): + """Additional parameters for update_user operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeUpdateUserOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_upload_batch_service_logs_options.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_upload_batch_service_logs_options.py new file mode 100644 index 00000000..071b712e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_upload_batch_service_logs_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUploadBatchServiceLogsOptions(Model): + """Additional parameters for upload_batch_service_logs operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeUploadBatchServiceLogsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_upload_batch_service_logs_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_upload_batch_service_logs_options_py3.py new file mode 100644 index 00000000..bac1dad5 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_upload_batch_service_logs_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUploadBatchServiceLogsOptions(Model): + """Additional parameters for upload_batch_service_logs operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(ComputeNodeUploadBatchServiceLogsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_user.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_user.py new file mode 100644 index 00000000..af365f75 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_user.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUser(Model): + """A user Account for RDP or SSH access on a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The user name of the Account. + :type name: str + :param is_admin: Whether the Account should be an administrator on the + Compute Node. The default value is false. + :type is_admin: bool + :param expiry_time: The time at which the Account should expire. If + omitted, the default is 1 day from the current time. For Linux Compute + Nodes, the expiryTime has a precision up to a day. + :type expiry_time: datetime + :param password: The password of the Account. The password is required for + Windows Compute Nodes (those created with 'cloudServiceConfiguration', or + created with 'virtualMachineConfiguration' using a Windows Image + reference). For Linux Compute Nodes, the password can optionally be + specified along with the sshPublicKey property. + :type password: str + :param ssh_public_key: The SSH public key that can be used for remote + login to the Compute Node. The public key should be compatible with + OpenSSH encoding and should be base 64 encoded. This property can be + specified only for Linux Compute Nodes. If this is specified for a Windows + Compute Node, then the Batch service rejects the request; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + :type ssh_public_key: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'is_admin': {'key': 'isAdmin', 'type': 'bool'}, + 'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'}, + 'password': {'key': 'password', 'type': 'str'}, + 'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ComputeNodeUser, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.is_admin = kwargs.get('is_admin', None) + self.expiry_time = kwargs.get('expiry_time', None) + self.password = kwargs.get('password', None) + self.ssh_public_key = kwargs.get('ssh_public_key', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/compute_node_user_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_user_py3.py new file mode 100644 index 00000000..36f1ef5b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/compute_node_user_py3.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComputeNodeUser(Model): + """A user Account for RDP or SSH access on a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The user name of the Account. + :type name: str + :param is_admin: Whether the Account should be an administrator on the + Compute Node. The default value is false. + :type is_admin: bool + :param expiry_time: The time at which the Account should expire. If + omitted, the default is 1 day from the current time. For Linux Compute + Nodes, the expiryTime has a precision up to a day. + :type expiry_time: datetime + :param password: The password of the Account. The password is required for + Windows Compute Nodes (those created with 'cloudServiceConfiguration', or + created with 'virtualMachineConfiguration' using a Windows Image + reference). For Linux Compute Nodes, the password can optionally be + specified along with the sshPublicKey property. + :type password: str + :param ssh_public_key: The SSH public key that can be used for remote + login to the Compute Node. The public key should be compatible with + OpenSSH encoding and should be base 64 encoded. This property can be + specified only for Linux Compute Nodes. If this is specified for a Windows + Compute Node, then the Batch service rejects the request; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + :type ssh_public_key: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'is_admin': {'key': 'isAdmin', 'type': 'bool'}, + 'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'}, + 'password': {'key': 'password', 'type': 'str'}, + 'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'}, + } + + def __init__(self, *, name: str, is_admin: bool=None, expiry_time=None, password: str=None, ssh_public_key: str=None, **kwargs) -> None: + super(ComputeNodeUser, self).__init__(**kwargs) + self.name = name + self.is_admin = is_admin + self.expiry_time = expiry_time + self.password = password + self.ssh_public_key = ssh_public_key diff --git a/azext/generated/sdk/batch/v2019_08_01/models/container_configuration.py b/azext/generated/sdk/batch/v2019_08_01/models/container_configuration.py new file mode 100644 index 00000000..ae90c83b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/container_configuration.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerConfiguration(Model): + """The configuration for container-enabled Pools. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar type: Required. The container technology to be used. Default value: + "dockerCompatible" . + :vartype type: str + :param container_image_names: The collection of container Image names. + This is the full Image reference, as would be specified to "docker pull". + An Image will be sourced from the default Docker registry unless the Image + is fully qualified with an alternative registry. + :type container_image_names: list[str] + :param container_registries: Additional private registries from which + containers can be pulled. If any Images must be downloaded from a private + registry which requires credentials, then those credentials must be + provided here. + :type container_registries: list[~azure.batch.models.ContainerRegistry] + """ + + _validation = { + 'type': {'required': True, 'constant': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'container_image_names': {'key': 'containerImageNames', 'type': '[str]'}, + 'container_registries': {'key': 'containerRegistries', 'type': '[ContainerRegistry]'}, + } + + type = "dockerCompatible" + + def __init__(self, **kwargs): + super(ContainerConfiguration, self).__init__(**kwargs) + self.container_image_names = kwargs.get('container_image_names', None) + self.container_registries = kwargs.get('container_registries', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/container_configuration_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/container_configuration_py3.py new file mode 100644 index 00000000..36885632 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/container_configuration_py3.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerConfiguration(Model): + """The configuration for container-enabled Pools. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar type: Required. The container technology to be used. Default value: + "dockerCompatible" . + :vartype type: str + :param container_image_names: The collection of container Image names. + This is the full Image reference, as would be specified to "docker pull". + An Image will be sourced from the default Docker registry unless the Image + is fully qualified with an alternative registry. + :type container_image_names: list[str] + :param container_registries: Additional private registries from which + containers can be pulled. If any Images must be downloaded from a private + registry which requires credentials, then those credentials must be + provided here. + :type container_registries: list[~azure.batch.models.ContainerRegistry] + """ + + _validation = { + 'type': {'required': True, 'constant': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'container_image_names': {'key': 'containerImageNames', 'type': '[str]'}, + 'container_registries': {'key': 'containerRegistries', 'type': '[ContainerRegistry]'}, + } + + type = "dockerCompatible" + + def __init__(self, *, container_image_names=None, container_registries=None, **kwargs) -> None: + super(ContainerConfiguration, self).__init__(**kwargs) + self.container_image_names = container_image_names + self.container_registries = container_registries diff --git a/azext/generated/sdk/batch/v2019_08_01/models/container_registry.py b/azext/generated/sdk/batch/v2019_08_01/models/container_registry.py new file mode 100644 index 00000000..18203196 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/container_registry.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerRegistry(Model): + """A private container registry. + + All required parameters must be populated in order to send to Azure. + + :param registry_server: The registry URL. If omitted, the default is + "docker.io". + :type registry_server: str + :param user_name: Required. The user name to log into the registry server. + :type user_name: str + :param password: Required. The password to log into the registry server. + :type password: str + """ + + _validation = { + 'user_name': {'required': True}, + 'password': {'required': True}, + } + + _attribute_map = { + 'registry_server': {'key': 'registryServer', 'type': 'str'}, + 'user_name': {'key': 'username', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ContainerRegistry, self).__init__(**kwargs) + self.registry_server = kwargs.get('registry_server', None) + self.user_name = kwargs.get('user_name', None) + self.password = kwargs.get('password', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/container_registry_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/container_registry_py3.py new file mode 100644 index 00000000..eb47f9e5 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/container_registry_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerRegistry(Model): + """A private container registry. + + All required parameters must be populated in order to send to Azure. + + :param registry_server: The registry URL. If omitted, the default is + "docker.io". + :type registry_server: str + :param user_name: Required. The user name to log into the registry server. + :type user_name: str + :param password: Required. The password to log into the registry server. + :type password: str + """ + + _validation = { + 'user_name': {'required': True}, + 'password': {'required': True}, + } + + _attribute_map = { + 'registry_server': {'key': 'registryServer', 'type': 'str'}, + 'user_name': {'key': 'username', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, *, user_name: str, password: str, registry_server: str=None, **kwargs) -> None: + super(ContainerRegistry, self).__init__(**kwargs) + self.registry_server = registry_server + self.user_name = user_name + self.password = password diff --git a/azext/generated/sdk/batch/v2019_08_01/models/data_disk.py b/azext/generated/sdk/batch/v2019_08_01/models/data_disk.py new file mode 100644 index 00000000..af214eeb --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/data_disk.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DataDisk(Model): + """Settings which will be used by the data disks associated to Compute Nodes + in the Pool. When using attached data disks, you need to mount and format + the disks from within a VM to use them. + + All required parameters must be populated in order to send to Azure. + + :param lun: Required. The logical unit number. The lun is used to uniquely + identify each data disk. If attaching multiple disks, each should have a + distinct lun. + :type lun: int + :param caching: The type of caching to be enabled for the data disks. The + default value for caching is readwrite. For information about the caching + options see: + https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. + Possible values include: 'none', 'readOnly', 'readWrite' + :type caching: str or ~azure.batch.models.CachingType + :param disk_size_gb: Required. The initial disk size in gigabytes. + :type disk_size_gb: int + :param storage_account_type: The storage Account type to be used for the + data disk. If omitted, the default is "standard_lrs". Possible values + include: 'StandardLRS', 'PremiumLRS' + :type storage_account_type: str or ~azure.batch.models.StorageAccountType + """ + + _validation = { + 'lun': {'required': True}, + 'disk_size_gb': {'required': True}, + } + + _attribute_map = { + 'lun': {'key': 'lun', 'type': 'int'}, + 'caching': {'key': 'caching', 'type': 'CachingType'}, + 'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'}, + 'storage_account_type': {'key': 'storageAccountType', 'type': 'StorageAccountType'}, + } + + def __init__(self, **kwargs): + super(DataDisk, self).__init__(**kwargs) + self.lun = kwargs.get('lun', None) + self.caching = kwargs.get('caching', None) + self.disk_size_gb = kwargs.get('disk_size_gb', None) + self.storage_account_type = kwargs.get('storage_account_type', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/data_disk_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/data_disk_py3.py new file mode 100644 index 00000000..532a14cd --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/data_disk_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DataDisk(Model): + """Settings which will be used by the data disks associated to Compute Nodes + in the Pool. When using attached data disks, you need to mount and format + the disks from within a VM to use them. + + All required parameters must be populated in order to send to Azure. + + :param lun: Required. The logical unit number. The lun is used to uniquely + identify each data disk. If attaching multiple disks, each should have a + distinct lun. + :type lun: int + :param caching: The type of caching to be enabled for the data disks. The + default value for caching is readwrite. For information about the caching + options see: + https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. + Possible values include: 'none', 'readOnly', 'readWrite' + :type caching: str or ~azure.batch.models.CachingType + :param disk_size_gb: Required. The initial disk size in gigabytes. + :type disk_size_gb: int + :param storage_account_type: The storage Account type to be used for the + data disk. If omitted, the default is "standard_lrs". Possible values + include: 'StandardLRS', 'PremiumLRS' + :type storage_account_type: str or ~azure.batch.models.StorageAccountType + """ + + _validation = { + 'lun': {'required': True}, + 'disk_size_gb': {'required': True}, + } + + _attribute_map = { + 'lun': {'key': 'lun', 'type': 'int'}, + 'caching': {'key': 'caching', 'type': 'CachingType'}, + 'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'}, + 'storage_account_type': {'key': 'storageAccountType', 'type': 'StorageAccountType'}, + } + + def __init__(self, *, lun: int, disk_size_gb: int, caching=None, storage_account_type=None, **kwargs) -> None: + super(DataDisk, self).__init__(**kwargs) + self.lun = lun + self.caching = caching + self.disk_size_gb = disk_size_gb + self.storage_account_type = storage_account_type diff --git a/azext/generated/sdk/batch/v2019_08_01/models/delete_certificate_error.py b/azext/generated/sdk/batch/v2019_08_01/models/delete_certificate_error.py new file mode 100644 index 00000000..01a2ab5e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/delete_certificate_error.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DeleteCertificateError(Model): + """An error encountered by the Batch service when deleting a Certificate. + + :param code: An identifier for the Certificate deletion error. Codes are + invariant and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the Certificate deletion error, + intended to be suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the + Certificate deletion error. This list includes details such as the active + Pools and Compute Nodes referencing this Certificate. However, if a large + number of resources reference the Certificate, the list contains only + about the first hundred. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(DeleteCertificateError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.values = kwargs.get('values', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/delete_certificate_error_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/delete_certificate_error_py3.py new file mode 100644 index 00000000..e7be00fc --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/delete_certificate_error_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DeleteCertificateError(Model): + """An error encountered by the Batch service when deleting a Certificate. + + :param code: An identifier for the Certificate deletion error. Codes are + invariant and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the Certificate deletion error, + intended to be suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the + Certificate deletion error. This list includes details such as the active + Pools and Compute Nodes referencing this Certificate. However, if a large + number of resources reference the Certificate, the list contains only + about the first hundred. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, code: str=None, message: str=None, values=None, **kwargs) -> None: + super(DeleteCertificateError, self).__init__(**kwargs) + self.code = code + self.message = message + self.values = values diff --git a/azext/generated/sdk/batch/v2019_08_01/models/environment_setting.py b/azext/generated/sdk/batch/v2019_08_01/models/environment_setting.py new file mode 100644 index 00000000..f2039d98 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/environment_setting.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class EnvironmentSetting(Model): + """An environment variable to be set on a Task process. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the environment variable. + :type name: str + :param value: The value of the environment variable. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(EnvironmentSetting, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/environment_setting_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/environment_setting_py3.py new file mode 100644 index 00000000..7a938844 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/environment_setting_py3.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class EnvironmentSetting(Model): + """An environment variable to be set on a Task process. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the environment variable. + :type name: str + :param value: The value of the environment variable. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, *, name: str, value: str=None, **kwargs) -> None: + super(EnvironmentSetting, self).__init__(**kwargs) + self.name = name + self.value = value diff --git a/azext/generated/sdk/batch/v2019_08_01/models/error_message.py b/azext/generated/sdk/batch/v2019_08_01/models/error_message.py new file mode 100644 index 00000000..bbdf64f1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/error_message.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ErrorMessage(Model): + """An error message received in an Azure Batch error response. + + :param lang: The language code of the error message. + :type lang: str + :param value: The text of the message. + :type value: str + """ + + _attribute_map = { + 'lang': {'key': 'lang', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ErrorMessage, self).__init__(**kwargs) + self.lang = kwargs.get('lang', None) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/error_message_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/error_message_py3.py new file mode 100644 index 00000000..a84934fc --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/error_message_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ErrorMessage(Model): + """An error message received in an Azure Batch error response. + + :param lang: The language code of the error message. + :type lang: str + :param value: The text of the message. + :type value: str + """ + + _attribute_map = { + 'lang': {'key': 'lang', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, *, lang: str=None, value: str=None, **kwargs) -> None: + super(ErrorMessage, self).__init__(**kwargs) + self.lang = lang + self.value = value diff --git a/azext/generated/sdk/batch/v2019_08_01/models/exit_code_mapping.py b/azext/generated/sdk/batch/v2019_08_01/models/exit_code_mapping.py new file mode 100644 index 00000000..7b18108a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/exit_code_mapping.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitCodeMapping(Model): + """How the Batch service should respond if a Task exits with a particular exit + code. + + All required parameters must be populated in order to send to Azure. + + :param code: Required. A process exit code. + :type code: int + :param exit_options: Required. How the Batch service should respond if the + Task exits with this exit code. + :type exit_options: ~azure.batch.models.ExitOptions + """ + + _validation = { + 'code': {'required': True}, + 'exit_options': {'required': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'int'}, + 'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'}, + } + + def __init__(self, **kwargs): + super(ExitCodeMapping, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.exit_options = kwargs.get('exit_options', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/exit_code_mapping_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/exit_code_mapping_py3.py new file mode 100644 index 00000000..01a0659a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/exit_code_mapping_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitCodeMapping(Model): + """How the Batch service should respond if a Task exits with a particular exit + code. + + All required parameters must be populated in order to send to Azure. + + :param code: Required. A process exit code. + :type code: int + :param exit_options: Required. How the Batch service should respond if the + Task exits with this exit code. + :type exit_options: ~azure.batch.models.ExitOptions + """ + + _validation = { + 'code': {'required': True}, + 'exit_options': {'required': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'int'}, + 'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'}, + } + + def __init__(self, *, code: int, exit_options, **kwargs) -> None: + super(ExitCodeMapping, self).__init__(**kwargs) + self.code = code + self.exit_options = exit_options diff --git a/azext/generated/sdk/batch/v2019_08_01/models/exit_code_range_mapping.py b/azext/generated/sdk/batch/v2019_08_01/models/exit_code_range_mapping.py new file mode 100644 index 00000000..6b988bad --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/exit_code_range_mapping.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitCodeRangeMapping(Model): + """A range of exit codes and how the Batch service should respond to exit + codes within that range. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. The first exit code in the range. + :type start: int + :param end: Required. The last exit code in the range. + :type end: int + :param exit_options: Required. How the Batch service should respond if the + Task exits with an exit code in the range start to end (inclusive). + :type exit_options: ~azure.batch.models.ExitOptions + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + 'exit_options': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'start', 'type': 'int'}, + 'end': {'key': 'end', 'type': 'int'}, + 'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'}, + } + + def __init__(self, **kwargs): + super(ExitCodeRangeMapping, self).__init__(**kwargs) + self.start = kwargs.get('start', None) + self.end = kwargs.get('end', None) + self.exit_options = kwargs.get('exit_options', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/exit_code_range_mapping_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/exit_code_range_mapping_py3.py new file mode 100644 index 00000000..51c7b3be --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/exit_code_range_mapping_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitCodeRangeMapping(Model): + """A range of exit codes and how the Batch service should respond to exit + codes within that range. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. The first exit code in the range. + :type start: int + :param end: Required. The last exit code in the range. + :type end: int + :param exit_options: Required. How the Batch service should respond if the + Task exits with an exit code in the range start to end (inclusive). + :type exit_options: ~azure.batch.models.ExitOptions + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + 'exit_options': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'start', 'type': 'int'}, + 'end': {'key': 'end', 'type': 'int'}, + 'exit_options': {'key': 'exitOptions', 'type': 'ExitOptions'}, + } + + def __init__(self, *, start: int, end: int, exit_options, **kwargs) -> None: + super(ExitCodeRangeMapping, self).__init__(**kwargs) + self.start = start + self.end = end + self.exit_options = exit_options diff --git a/azext/generated/sdk/batch/v2019_08_01/models/exit_conditions.py b/azext/generated/sdk/batch/v2019_08_01/models/exit_conditions.py new file mode 100644 index 00000000..b23924ad --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/exit_conditions.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitConditions(Model): + """Specifies how the Batch service should respond when the Task completes. + + :param exit_codes: A list of individual Task exit codes and how the Batch + service should respond to them. + :type exit_codes: list[~azure.batch.models.ExitCodeMapping] + :param exit_code_ranges: A list of Task exit code ranges and how the Batch + service should respond to them. + :type exit_code_ranges: list[~azure.batch.models.ExitCodeRangeMapping] + :param pre_processing_error: How the Batch service should respond if the + Task fails to start due to an error. + :type pre_processing_error: ~azure.batch.models.ExitOptions + :param file_upload_error: How the Batch service should respond if a file + upload error occurs. If the Task exited with an exit code that was + specified via exitCodes or exitCodeRanges, and then encountered a file + upload error, then the action specified by the exit code takes precedence. + :type file_upload_error: ~azure.batch.models.ExitOptions + :param default: How the Batch service should respond if the Task fails + with an exit condition not covered by any of the other properties. This + value is used if the Task exits with any nonzero exit code not listed in + the exitCodes or exitCodeRanges collection, with a pre-processing error if + the preProcessingError property is not present, or with a file upload + error if the fileUploadError property is not present. If you want + non-default behavior on exit code 0, you must list it explicitly using the + exitCodes or exitCodeRanges collection. + :type default: ~azure.batch.models.ExitOptions + """ + + _attribute_map = { + 'exit_codes': {'key': 'exitCodes', 'type': '[ExitCodeMapping]'}, + 'exit_code_ranges': {'key': 'exitCodeRanges', 'type': '[ExitCodeRangeMapping]'}, + 'pre_processing_error': {'key': 'preProcessingError', 'type': 'ExitOptions'}, + 'file_upload_error': {'key': 'fileUploadError', 'type': 'ExitOptions'}, + 'default': {'key': 'default', 'type': 'ExitOptions'}, + } + + def __init__(self, **kwargs): + super(ExitConditions, self).__init__(**kwargs) + self.exit_codes = kwargs.get('exit_codes', None) + self.exit_code_ranges = kwargs.get('exit_code_ranges', None) + self.pre_processing_error = kwargs.get('pre_processing_error', None) + self.file_upload_error = kwargs.get('file_upload_error', None) + self.default = kwargs.get('default', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/exit_conditions_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/exit_conditions_py3.py new file mode 100644 index 00000000..89fb190d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/exit_conditions_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitConditions(Model): + """Specifies how the Batch service should respond when the Task completes. + + :param exit_codes: A list of individual Task exit codes and how the Batch + service should respond to them. + :type exit_codes: list[~azure.batch.models.ExitCodeMapping] + :param exit_code_ranges: A list of Task exit code ranges and how the Batch + service should respond to them. + :type exit_code_ranges: list[~azure.batch.models.ExitCodeRangeMapping] + :param pre_processing_error: How the Batch service should respond if the + Task fails to start due to an error. + :type pre_processing_error: ~azure.batch.models.ExitOptions + :param file_upload_error: How the Batch service should respond if a file + upload error occurs. If the Task exited with an exit code that was + specified via exitCodes or exitCodeRanges, and then encountered a file + upload error, then the action specified by the exit code takes precedence. + :type file_upload_error: ~azure.batch.models.ExitOptions + :param default: How the Batch service should respond if the Task fails + with an exit condition not covered by any of the other properties. This + value is used if the Task exits with any nonzero exit code not listed in + the exitCodes or exitCodeRanges collection, with a pre-processing error if + the preProcessingError property is not present, or with a file upload + error if the fileUploadError property is not present. If you want + non-default behavior on exit code 0, you must list it explicitly using the + exitCodes or exitCodeRanges collection. + :type default: ~azure.batch.models.ExitOptions + """ + + _attribute_map = { + 'exit_codes': {'key': 'exitCodes', 'type': '[ExitCodeMapping]'}, + 'exit_code_ranges': {'key': 'exitCodeRanges', 'type': '[ExitCodeRangeMapping]'}, + 'pre_processing_error': {'key': 'preProcessingError', 'type': 'ExitOptions'}, + 'file_upload_error': {'key': 'fileUploadError', 'type': 'ExitOptions'}, + 'default': {'key': 'default', 'type': 'ExitOptions'}, + } + + def __init__(self, *, exit_codes=None, exit_code_ranges=None, pre_processing_error=None, file_upload_error=None, default=None, **kwargs) -> None: + super(ExitConditions, self).__init__(**kwargs) + self.exit_codes = exit_codes + self.exit_code_ranges = exit_code_ranges + self.pre_processing_error = pre_processing_error + self.file_upload_error = file_upload_error + self.default = default diff --git a/azext/generated/sdk/batch/v2019_08_01/models/exit_options.py b/azext/generated/sdk/batch/v2019_08_01/models/exit_options.py new file mode 100644 index 00000000..637a878e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/exit_options.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitOptions(Model): + """Specifies how the Batch service responds to a particular exit condition. + + :param job_action: An action to take on the Job containing the Task, if + the Task completes with the given exit condition and the Job's + onTaskFailed property is 'performExitOptionsJobAction'. The default is + none for exit code 0 and terminate for all other exit conditions. If the + Job's onTaskFailed property is noaction, then specifying this property + returns an error and the add Task request fails with an invalid property + value error; if you are calling the REST API directly, the HTTP status + code is 400 (Bad Request). Possible values include: 'none', 'disable', + 'terminate' + :type job_action: str or ~azure.batch.models.JobAction + :param dependency_action: An action that the Batch service performs on + Tasks that depend on this Task. Possible values are 'satisfy' (allowing + dependent tasks to progress) and 'block' (dependent tasks continue to + wait). Batch does not yet support cancellation of dependent tasks. + Possible values include: 'satisfy', 'block' + :type dependency_action: str or ~azure.batch.models.DependencyAction + """ + + _attribute_map = { + 'job_action': {'key': 'jobAction', 'type': 'JobAction'}, + 'dependency_action': {'key': 'dependencyAction', 'type': 'DependencyAction'}, + } + + def __init__(self, **kwargs): + super(ExitOptions, self).__init__(**kwargs) + self.job_action = kwargs.get('job_action', None) + self.dependency_action = kwargs.get('dependency_action', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/exit_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/exit_options_py3.py new file mode 100644 index 00000000..aae530e3 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/exit_options_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ExitOptions(Model): + """Specifies how the Batch service responds to a particular exit condition. + + :param job_action: An action to take on the Job containing the Task, if + the Task completes with the given exit condition and the Job's + onTaskFailed property is 'performExitOptionsJobAction'. The default is + none for exit code 0 and terminate for all other exit conditions. If the + Job's onTaskFailed property is noaction, then specifying this property + returns an error and the add Task request fails with an invalid property + value error; if you are calling the REST API directly, the HTTP status + code is 400 (Bad Request). Possible values include: 'none', 'disable', + 'terminate' + :type job_action: str or ~azure.batch.models.JobAction + :param dependency_action: An action that the Batch service performs on + Tasks that depend on this Task. Possible values are 'satisfy' (allowing + dependent tasks to progress) and 'block' (dependent tasks continue to + wait). Batch does not yet support cancellation of dependent tasks. + Possible values include: 'satisfy', 'block' + :type dependency_action: str or ~azure.batch.models.DependencyAction + """ + + _attribute_map = { + 'job_action': {'key': 'jobAction', 'type': 'JobAction'}, + 'dependency_action': {'key': 'dependencyAction', 'type': 'DependencyAction'}, + } + + def __init__(self, *, job_action=None, dependency_action=None, **kwargs) -> None: + super(ExitOptions, self).__init__(**kwargs) + self.job_action = job_action + self.dependency_action = dependency_action diff --git a/azext/generated/sdk/batch/v2019_08_01/models/file_delete_from_compute_node_options.py b/azext/generated/sdk/batch/v2019_08_01/models/file_delete_from_compute_node_options.py new file mode 100644 index 00000000..7522e806 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/file_delete_from_compute_node_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileDeleteFromComputeNodeOptions(Model): + """Additional parameters for delete_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileDeleteFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/file_delete_from_compute_node_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/file_delete_from_compute_node_options_py3.py new file mode 100644 index 00000000..62291d14 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/file_delete_from_compute_node_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileDeleteFromComputeNodeOptions(Model): + """Additional parameters for delete_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(FileDeleteFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/file_delete_from_task_options.py b/azext/generated/sdk/batch/v2019_08_01/models/file_delete_from_task_options.py new file mode 100644 index 00000000..054babe8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/file_delete_from_task_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileDeleteFromTaskOptions(Model): + """Additional parameters for delete_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileDeleteFromTaskOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/file_delete_from_task_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/file_delete_from_task_options_py3.py new file mode 100644 index 00000000..7d783006 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/file_delete_from_task_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileDeleteFromTaskOptions(Model): + """Additional parameters for delete_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(FileDeleteFromTaskOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/file_get_from_compute_node_options.py b/azext/generated/sdk/batch/v2019_08_01/models/file_get_from_compute_node_options.py new file mode 100644 index 00000000..9a6e3fb7 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/file_get_from_compute_node_options.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetFromComputeNodeOptions(Model): + """Additional parameters for get_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param ocp_range: The byte range to be retrieved. The default is to + retrieve the entire file. The format is bytes=startRange-endRange. + :type ocp_range: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'ocp_range': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileGetFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.ocp_range = kwargs.get('ocp_range', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/file_get_from_compute_node_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/file_get_from_compute_node_options_py3.py new file mode 100644 index 00000000..ab3dc34f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/file_get_from_compute_node_options_py3.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetFromComputeNodeOptions(Model): + """Additional parameters for get_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param ocp_range: The byte range to be retrieved. The default is to + retrieve the entire file. The format is bytes=startRange-endRange. + :type ocp_range: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'ocp_range': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, ocp_range: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(FileGetFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.ocp_range = ocp_range + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/file_get_from_task_options.py b/azext/generated/sdk/batch/v2019_08_01/models/file_get_from_task_options.py new file mode 100644 index 00000000..19bd5cde --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/file_get_from_task_options.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetFromTaskOptions(Model): + """Additional parameters for get_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param ocp_range: The byte range to be retrieved. The default is to + retrieve the entire file. The format is bytes=startRange-endRange. + :type ocp_range: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'ocp_range': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileGetFromTaskOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.ocp_range = kwargs.get('ocp_range', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/file_get_from_task_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/file_get_from_task_options_py3.py new file mode 100644 index 00000000..30ec6583 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/file_get_from_task_options_py3.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetFromTaskOptions(Model): + """Additional parameters for get_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param ocp_range: The byte range to be retrieved. The default is to + retrieve the entire file. The format is bytes=startRange-endRange. + :type ocp_range: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'ocp_range': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, ocp_range: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(FileGetFromTaskOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.ocp_range = ocp_range + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/file_get_properties_from_compute_node_options.py b/azext/generated/sdk/batch/v2019_08_01/models/file_get_properties_from_compute_node_options.py new file mode 100644 index 00000000..bf283d1d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/file_get_properties_from_compute_node_options.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetPropertiesFromComputeNodeOptions(Model): + """Additional parameters for get_properties_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileGetPropertiesFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/file_get_properties_from_compute_node_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/file_get_properties_from_compute_node_options_py3.py new file mode 100644 index 00000000..69a90184 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/file_get_properties_from_compute_node_options_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetPropertiesFromComputeNodeOptions(Model): + """Additional parameters for get_properties_from_compute_node operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(FileGetPropertiesFromComputeNodeOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/file_get_properties_from_task_options.py b/azext/generated/sdk/batch/v2019_08_01/models/file_get_properties_from_task_options.py new file mode 100644 index 00000000..836387d3 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/file_get_properties_from_task_options.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetPropertiesFromTaskOptions(Model): + """Additional parameters for get_properties_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileGetPropertiesFromTaskOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/file_get_properties_from_task_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/file_get_properties_from_task_options_py3.py new file mode 100644 index 00000000..73996895 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/file_get_properties_from_task_options_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileGetPropertiesFromTaskOptions(Model): + """Additional parameters for get_properties_from_task operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(FileGetPropertiesFromTaskOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/file_list_from_compute_node_options.py b/azext/generated/sdk/batch/v2019_08_01/models/file_list_from_compute_node_options.py new file mode 100644 index 00000000..dc32df46 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/file_list_from_compute_node_options.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileListFromComputeNodeOptions(Model): + """Additional parameters for list_from_compute_node operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 files can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileListFromComputeNodeOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/file_list_from_compute_node_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/file_list_from_compute_node_options_py3.py new file mode 100644 index 00000000..e475dcde --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/file_list_from_compute_node_options_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileListFromComputeNodeOptions(Model): + """Additional parameters for list_from_compute_node operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 files can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(FileListFromComputeNodeOptions, self).__init__(**kwargs) + self.filter = filter + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/file_list_from_task_options.py b/azext/generated/sdk/batch/v2019_08_01/models/file_list_from_task_options.py new file mode 100644 index 00000000..86728b25 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/file_list_from_task_options.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileListFromTaskOptions(Model): + """Additional parameters for list_from_task operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 files can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(FileListFromTaskOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/file_list_from_task_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/file_list_from_task_options_py3.py new file mode 100644 index 00000000..354c4869 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/file_list_from_task_options_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileListFromTaskOptions(Model): + """Additional parameters for list_from_task operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 files can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(FileListFromTaskOptions, self).__init__(**kwargs) + self.filter = filter + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/file_properties.py b/azext/generated/sdk/batch/v2019_08_01/models/file_properties.py new file mode 100644 index 00000000..047a5e72 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/file_properties.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileProperties(Model): + """The properties of a file on a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param creation_time: The file creation time. The creation time is not + returned for files on Linux Compute Nodes. + :type creation_time: datetime + :param last_modified: Required. The time at which the file was last + modified. + :type last_modified: datetime + :param content_length: Required. The length of the file. + :type content_length: long + :param content_type: The content type of the file. + :type content_type: str + :param file_mode: The file mode attribute in octal format. The file mode + is returned only for files on Linux Compute Nodes. + :type file_mode: str + """ + + _validation = { + 'last_modified': {'required': True}, + 'content_length': {'required': True}, + } + + _attribute_map = { + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'content_length': {'key': 'contentLength', 'type': 'long'}, + 'content_type': {'key': 'contentType', 'type': 'str'}, + 'file_mode': {'key': 'fileMode', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(FileProperties, self).__init__(**kwargs) + self.creation_time = kwargs.get('creation_time', None) + self.last_modified = kwargs.get('last_modified', None) + self.content_length = kwargs.get('content_length', None) + self.content_type = kwargs.get('content_type', None) + self.file_mode = kwargs.get('file_mode', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/file_properties_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/file_properties_py3.py new file mode 100644 index 00000000..ccfe33fd --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/file_properties_py3.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileProperties(Model): + """The properties of a file on a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param creation_time: The file creation time. The creation time is not + returned for files on Linux Compute Nodes. + :type creation_time: datetime + :param last_modified: Required. The time at which the file was last + modified. + :type last_modified: datetime + :param content_length: Required. The length of the file. + :type content_length: long + :param content_type: The content type of the file. + :type content_type: str + :param file_mode: The file mode attribute in octal format. The file mode + is returned only for files on Linux Compute Nodes. + :type file_mode: str + """ + + _validation = { + 'last_modified': {'required': True}, + 'content_length': {'required': True}, + } + + _attribute_map = { + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'content_length': {'key': 'contentLength', 'type': 'long'}, + 'content_type': {'key': 'contentType', 'type': 'str'}, + 'file_mode': {'key': 'fileMode', 'type': 'str'}, + } + + def __init__(self, *, last_modified, content_length: int, creation_time=None, content_type: str=None, file_mode: str=None, **kwargs) -> None: + super(FileProperties, self).__init__(**kwargs) + self.creation_time = creation_time + self.last_modified = last_modified + self.content_length = content_length + self.content_type = content_type + self.file_mode = file_mode diff --git a/azext/generated/sdk/batch/v2019_08_01/models/image_information.py b/azext/generated/sdk/batch/v2019_08_01/models/image_information.py new file mode 100644 index 00000000..cc3d7fd7 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/image_information.py @@ -0,0 +1,67 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageInformation(Model): + """A reference to the Azure Virtual Machines Marketplace Image and additional + information about the Image. + + All required parameters must be populated in order to send to Azure. + + :param node_agent_sku_id: Required. The ID of the Compute Node agent SKU + which the Image supports. + :type node_agent_sku_id: str + :param image_reference: Required. The reference to the Azure Virtual + Machine's Marketplace Image. + :type image_reference: ~azure.batch.models.ImageReference + :param os_type: Required. The type of operating system (e.g. Windows or + Linux) of the Image. Possible values include: 'linux', 'windows' + :type os_type: str or ~azure.batch.models.OSType + :param capabilities: The capabilities or features which the Image + supports. Not every capability of the Image is listed. Capabilities in + this list are considered of special interest and are generally related to + integration with other features in the Azure Batch service. + :type capabilities: list[str] + :param batch_support_end_of_life: The time when the Azure Batch service + will stop accepting create Pool requests for the Image. + :type batch_support_end_of_life: datetime + :param verification_type: Required. Whether the Azure Batch service + actively verifies that the Image is compatible with the associated Compute + Node agent SKU. Possible values include: 'verified', 'unverified' + :type verification_type: str or ~azure.batch.models.VerificationType + """ + + _validation = { + 'node_agent_sku_id': {'required': True}, + 'image_reference': {'required': True}, + 'os_type': {'required': True}, + 'verification_type': {'required': True}, + } + + _attribute_map = { + 'node_agent_sku_id': {'key': 'nodeAgentSKUId', 'type': 'str'}, + 'image_reference': {'key': 'imageReference', 'type': 'ImageReference'}, + 'os_type': {'key': 'osType', 'type': 'OSType'}, + 'capabilities': {'key': 'capabilities', 'type': '[str]'}, + 'batch_support_end_of_life': {'key': 'batchSupportEndOfLife', 'type': 'iso-8601'}, + 'verification_type': {'key': 'verificationType', 'type': 'VerificationType'}, + } + + def __init__(self, **kwargs): + super(ImageInformation, self).__init__(**kwargs) + self.node_agent_sku_id = kwargs.get('node_agent_sku_id', None) + self.image_reference = kwargs.get('image_reference', None) + self.os_type = kwargs.get('os_type', None) + self.capabilities = kwargs.get('capabilities', None) + self.batch_support_end_of_life = kwargs.get('batch_support_end_of_life', None) + self.verification_type = kwargs.get('verification_type', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/image_information_paged.py b/azext/generated/sdk/batch/v2019_08_01/models/image_information_paged.py new file mode 100644 index 00000000..27a3647b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/image_information_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class ImageInformationPaged(Paged): + """ + A paging container for iterating over a list of :class:`ImageInformation ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[ImageInformation]'} + } + + def __init__(self, *args, **kwargs): + + super(ImageInformationPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/image_information_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/image_information_py3.py new file mode 100644 index 00000000..7515a9d2 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/image_information_py3.py @@ -0,0 +1,67 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageInformation(Model): + """A reference to the Azure Virtual Machines Marketplace Image and additional + information about the Image. + + All required parameters must be populated in order to send to Azure. + + :param node_agent_sku_id: Required. The ID of the Compute Node agent SKU + which the Image supports. + :type node_agent_sku_id: str + :param image_reference: Required. The reference to the Azure Virtual + Machine's Marketplace Image. + :type image_reference: ~azure.batch.models.ImageReference + :param os_type: Required. The type of operating system (e.g. Windows or + Linux) of the Image. Possible values include: 'linux', 'windows' + :type os_type: str or ~azure.batch.models.OSType + :param capabilities: The capabilities or features which the Image + supports. Not every capability of the Image is listed. Capabilities in + this list are considered of special interest and are generally related to + integration with other features in the Azure Batch service. + :type capabilities: list[str] + :param batch_support_end_of_life: The time when the Azure Batch service + will stop accepting create Pool requests for the Image. + :type batch_support_end_of_life: datetime + :param verification_type: Required. Whether the Azure Batch service + actively verifies that the Image is compatible with the associated Compute + Node agent SKU. Possible values include: 'verified', 'unverified' + :type verification_type: str or ~azure.batch.models.VerificationType + """ + + _validation = { + 'node_agent_sku_id': {'required': True}, + 'image_reference': {'required': True}, + 'os_type': {'required': True}, + 'verification_type': {'required': True}, + } + + _attribute_map = { + 'node_agent_sku_id': {'key': 'nodeAgentSKUId', 'type': 'str'}, + 'image_reference': {'key': 'imageReference', 'type': 'ImageReference'}, + 'os_type': {'key': 'osType', 'type': 'OSType'}, + 'capabilities': {'key': 'capabilities', 'type': '[str]'}, + 'batch_support_end_of_life': {'key': 'batchSupportEndOfLife', 'type': 'iso-8601'}, + 'verification_type': {'key': 'verificationType', 'type': 'VerificationType'}, + } + + def __init__(self, *, node_agent_sku_id: str, image_reference, os_type, verification_type, capabilities=None, batch_support_end_of_life=None, **kwargs) -> None: + super(ImageInformation, self).__init__(**kwargs) + self.node_agent_sku_id = node_agent_sku_id + self.image_reference = image_reference + self.os_type = os_type + self.capabilities = capabilities + self.batch_support_end_of_life = batch_support_end_of_life + self.verification_type = verification_type diff --git a/azext/generated/sdk/batch/v2019_08_01/models/image_reference.py b/azext/generated/sdk/batch/v2019_08_01/models/image_reference.py new file mode 100644 index 00000000..1a7aa8a6 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/image_reference.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageReference(Model): + """A reference to an Azure Virtual Machines Marketplace Image or a custom + Azure Virtual Machine Image. To get the list of all Azure Marketplace Image + references verified by Azure Batch, see the 'List supported Images' + operation. + + :param publisher: The publisher of the Azure Virtual Machines Marketplace + Image. For example, Canonical or MicrosoftWindowsServer. + :type publisher: str + :param offer: The offer type of the Azure Virtual Machines Marketplace + Image. For example, UbuntuServer or WindowsServer. + :type offer: str + :param sku: The SKU of the Azure Virtual Machines Marketplace Image. For + example, 18.04-LTS or 2019-Datacenter. + :type sku: str + :param version: The version of the Azure Virtual Machines Marketplace + Image. A value of 'latest' can be specified to select the latest version + of an Image. If omitted, the default is 'latest'. + :type version: str + :param virtual_machine_image_id: The ARM resource identifier of the + Virtual Machine Image or Shared Image Gallery Image. Computes Compute + Nodes of the Pool will be created using this Image Id. This is of either + the form + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName} + for Virtual Machine Image or + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{versionId} + for SIG image. This property is mutually exclusive with other + ImageReference properties. For Virtual Machine Image it must be in the + same region and subscription as the Azure Batch account. For SIG image it + must have replicas in the same region as the Azure Batch account. For + information about the firewall settings for the Batch Compute Node agent + to communicate with the Batch service see + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + :type virtual_machine_image_id: str + """ + + _attribute_map = { + 'publisher': {'key': 'publisher', 'type': 'str'}, + 'offer': {'key': 'offer', 'type': 'str'}, + 'sku': {'key': 'sku', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'virtual_machine_image_id': {'key': 'virtualMachineImageId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ImageReference, self).__init__(**kwargs) + self.publisher = kwargs.get('publisher', None) + self.offer = kwargs.get('offer', None) + self.sku = kwargs.get('sku', None) + self.version = kwargs.get('version', None) + self.virtual_machine_image_id = kwargs.get('virtual_machine_image_id', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/image_reference_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/image_reference_py3.py new file mode 100644 index 00000000..ddb0f952 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/image_reference_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageReference(Model): + """A reference to an Azure Virtual Machines Marketplace Image or a custom + Azure Virtual Machine Image. To get the list of all Azure Marketplace Image + references verified by Azure Batch, see the 'List supported Images' + operation. + + :param publisher: The publisher of the Azure Virtual Machines Marketplace + Image. For example, Canonical or MicrosoftWindowsServer. + :type publisher: str + :param offer: The offer type of the Azure Virtual Machines Marketplace + Image. For example, UbuntuServer or WindowsServer. + :type offer: str + :param sku: The SKU of the Azure Virtual Machines Marketplace Image. For + example, 18.04-LTS or 2019-Datacenter. + :type sku: str + :param version: The version of the Azure Virtual Machines Marketplace + Image. A value of 'latest' can be specified to select the latest version + of an Image. If omitted, the default is 'latest'. + :type version: str + :param virtual_machine_image_id: The ARM resource identifier of the + Virtual Machine Image or Shared Image Gallery Image. Computes Compute + Nodes of the Pool will be created using this Image Id. This is of either + the form + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName} + for Virtual Machine Image or + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{versionId} + for SIG image. This property is mutually exclusive with other + ImageReference properties. For Virtual Machine Image it must be in the + same region and subscription as the Azure Batch account. For SIG image it + must have replicas in the same region as the Azure Batch account. For + information about the firewall settings for the Batch Compute Node agent + to communicate with the Batch service see + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + :type virtual_machine_image_id: str + """ + + _attribute_map = { + 'publisher': {'key': 'publisher', 'type': 'str'}, + 'offer': {'key': 'offer', 'type': 'str'}, + 'sku': {'key': 'sku', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'virtual_machine_image_id': {'key': 'virtualMachineImageId', 'type': 'str'}, + } + + def __init__(self, *, publisher: str=None, offer: str=None, sku: str=None, version: str=None, virtual_machine_image_id: str=None, **kwargs) -> None: + super(ImageReference, self).__init__(**kwargs) + self.publisher = publisher + self.offer = offer + self.sku = sku + self.version = version + self.virtual_machine_image_id = virtual_machine_image_id diff --git a/azext/generated/sdk/batch/v2019_08_01/models/inbound_endpoint.py b/azext/generated/sdk/batch/v2019_08_01/models/inbound_endpoint.py new file mode 100644 index 00000000..06fecdc7 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/inbound_endpoint.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class InboundEndpoint(Model): + """An inbound endpoint on a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the endpoint. + :type name: str + :param protocol: Required. The protocol of the endpoint. Possible values + include: 'tcp', 'udp' + :type protocol: str or ~azure.batch.models.InboundEndpointProtocol + :param public_ip_address: Required. The public IP address of the Compute + Node. + :type public_ip_address: str + :param public_fqdn: Required. The public fully qualified domain name for + the Compute Node. + :type public_fqdn: str + :param frontend_port: Required. The public port number of the endpoint. + :type frontend_port: int + :param backend_port: Required. The backend port number of the endpoint. + :type backend_port: int + """ + + _validation = { + 'name': {'required': True}, + 'protocol': {'required': True}, + 'public_ip_address': {'required': True}, + 'public_fqdn': {'required': True}, + 'frontend_port': {'required': True}, + 'backend_port': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'protocol': {'key': 'protocol', 'type': 'InboundEndpointProtocol'}, + 'public_ip_address': {'key': 'publicIPAddress', 'type': 'str'}, + 'public_fqdn': {'key': 'publicFQDN', 'type': 'str'}, + 'frontend_port': {'key': 'frontendPort', 'type': 'int'}, + 'backend_port': {'key': 'backendPort', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(InboundEndpoint, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.protocol = kwargs.get('protocol', None) + self.public_ip_address = kwargs.get('public_ip_address', None) + self.public_fqdn = kwargs.get('public_fqdn', None) + self.frontend_port = kwargs.get('frontend_port', None) + self.backend_port = kwargs.get('backend_port', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/inbound_endpoint_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/inbound_endpoint_py3.py new file mode 100644 index 00000000..d8bdbd94 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/inbound_endpoint_py3.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class InboundEndpoint(Model): + """An inbound endpoint on a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the endpoint. + :type name: str + :param protocol: Required. The protocol of the endpoint. Possible values + include: 'tcp', 'udp' + :type protocol: str or ~azure.batch.models.InboundEndpointProtocol + :param public_ip_address: Required. The public IP address of the Compute + Node. + :type public_ip_address: str + :param public_fqdn: Required. The public fully qualified domain name for + the Compute Node. + :type public_fqdn: str + :param frontend_port: Required. The public port number of the endpoint. + :type frontend_port: int + :param backend_port: Required. The backend port number of the endpoint. + :type backend_port: int + """ + + _validation = { + 'name': {'required': True}, + 'protocol': {'required': True}, + 'public_ip_address': {'required': True}, + 'public_fqdn': {'required': True}, + 'frontend_port': {'required': True}, + 'backend_port': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'protocol': {'key': 'protocol', 'type': 'InboundEndpointProtocol'}, + 'public_ip_address': {'key': 'publicIPAddress', 'type': 'str'}, + 'public_fqdn': {'key': 'publicFQDN', 'type': 'str'}, + 'frontend_port': {'key': 'frontendPort', 'type': 'int'}, + 'backend_port': {'key': 'backendPort', 'type': 'int'}, + } + + def __init__(self, *, name: str, protocol, public_ip_address: str, public_fqdn: str, frontend_port: int, backend_port: int, **kwargs) -> None: + super(InboundEndpoint, self).__init__(**kwargs) + self.name = name + self.protocol = protocol + self.public_ip_address = public_ip_address + self.public_fqdn = public_fqdn + self.frontend_port = frontend_port + self.backend_port = backend_port diff --git a/azext/generated/sdk/batch/v2019_08_01/models/inbound_nat_pool.py b/azext/generated/sdk/batch/v2019_08_01/models/inbound_nat_pool.py new file mode 100644 index 00000000..654db312 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/inbound_nat_pool.py @@ -0,0 +1,86 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class InboundNATPool(Model): + """A inbound NAT Pool that can be used to address specific ports on Compute + Nodes in a Batch Pool externally. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the endpoint. The name must be unique + within a Batch Pool, can contain letters, numbers, underscores, periods, + and hyphens. Names must start with a letter or number, must end with a + letter, number, or underscore, and cannot exceed 77 characters. If any + invalid values are provided the request fails with HTTP status code 400. + :type name: str + :param protocol: Required. The protocol of the endpoint. Possible values + include: 'tcp', 'udp' + :type protocol: str or ~azure.batch.models.InboundEndpointProtocol + :param backend_port: Required. The port number on the Compute Node. This + must be unique within a Batch Pool. Acceptable values are between 1 and + 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any + reserved values are provided the request fails with HTTP status code 400. + :type backend_port: int + :param frontend_port_range_start: Required. The first port number in the + range of external ports that will be used to provide inbound access to the + backendPort on individual Compute Nodes. Acceptable values range between 1 + and 65534 except ports from 50000 to 55000 which are reserved. All ranges + within a Pool must be distinct and cannot overlap. Each range must contain + at least 40 ports. If any reserved or overlapping values are provided the + request fails with HTTP status code 400. + :type frontend_port_range_start: int + :param frontend_port_range_end: Required. The last port number in the + range of external ports that will be used to provide inbound access to the + backendPort on individual Compute Nodes. Acceptable values range between 1 + and 65534 except ports from 50000 to 55000 which are reserved by the Batch + service. All ranges within a Pool must be distinct and cannot overlap. + Each range must contain at least 40 ports. If any reserved or overlapping + values are provided the request fails with HTTP status code 400. + :type frontend_port_range_end: int + :param network_security_group_rules: A list of network security group + rules that will be applied to the endpoint. The maximum number of rules + that can be specified across all the endpoints on a Batch Pool is 25. If + no network security group rules are specified, a default rule will be + created to allow inbound access to the specified backendPort. If the + maximum number of network security group rules is exceeded the request + fails with HTTP status code 400. + :type network_security_group_rules: + list[~azure.batch.models.NetworkSecurityGroupRule] + """ + + _validation = { + 'name': {'required': True}, + 'protocol': {'required': True}, + 'backend_port': {'required': True}, + 'frontend_port_range_start': {'required': True}, + 'frontend_port_range_end': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'protocol': {'key': 'protocol', 'type': 'InboundEndpointProtocol'}, + 'backend_port': {'key': 'backendPort', 'type': 'int'}, + 'frontend_port_range_start': {'key': 'frontendPortRangeStart', 'type': 'int'}, + 'frontend_port_range_end': {'key': 'frontendPortRangeEnd', 'type': 'int'}, + 'network_security_group_rules': {'key': 'networkSecurityGroupRules', 'type': '[NetworkSecurityGroupRule]'}, + } + + def __init__(self, **kwargs): + super(InboundNATPool, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.protocol = kwargs.get('protocol', None) + self.backend_port = kwargs.get('backend_port', None) + self.frontend_port_range_start = kwargs.get('frontend_port_range_start', None) + self.frontend_port_range_end = kwargs.get('frontend_port_range_end', None) + self.network_security_group_rules = kwargs.get('network_security_group_rules', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/inbound_nat_pool_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/inbound_nat_pool_py3.py new file mode 100644 index 00000000..63f4a0b2 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/inbound_nat_pool_py3.py @@ -0,0 +1,86 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class InboundNATPool(Model): + """A inbound NAT Pool that can be used to address specific ports on Compute + Nodes in a Batch Pool externally. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the endpoint. The name must be unique + within a Batch Pool, can contain letters, numbers, underscores, periods, + and hyphens. Names must start with a letter or number, must end with a + letter, number, or underscore, and cannot exceed 77 characters. If any + invalid values are provided the request fails with HTTP status code 400. + :type name: str + :param protocol: Required. The protocol of the endpoint. Possible values + include: 'tcp', 'udp' + :type protocol: str or ~azure.batch.models.InboundEndpointProtocol + :param backend_port: Required. The port number on the Compute Node. This + must be unique within a Batch Pool. Acceptable values are between 1 and + 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any + reserved values are provided the request fails with HTTP status code 400. + :type backend_port: int + :param frontend_port_range_start: Required. The first port number in the + range of external ports that will be used to provide inbound access to the + backendPort on individual Compute Nodes. Acceptable values range between 1 + and 65534 except ports from 50000 to 55000 which are reserved. All ranges + within a Pool must be distinct and cannot overlap. Each range must contain + at least 40 ports. If any reserved or overlapping values are provided the + request fails with HTTP status code 400. + :type frontend_port_range_start: int + :param frontend_port_range_end: Required. The last port number in the + range of external ports that will be used to provide inbound access to the + backendPort on individual Compute Nodes. Acceptable values range between 1 + and 65534 except ports from 50000 to 55000 which are reserved by the Batch + service. All ranges within a Pool must be distinct and cannot overlap. + Each range must contain at least 40 ports. If any reserved or overlapping + values are provided the request fails with HTTP status code 400. + :type frontend_port_range_end: int + :param network_security_group_rules: A list of network security group + rules that will be applied to the endpoint. The maximum number of rules + that can be specified across all the endpoints on a Batch Pool is 25. If + no network security group rules are specified, a default rule will be + created to allow inbound access to the specified backendPort. If the + maximum number of network security group rules is exceeded the request + fails with HTTP status code 400. + :type network_security_group_rules: + list[~azure.batch.models.NetworkSecurityGroupRule] + """ + + _validation = { + 'name': {'required': True}, + 'protocol': {'required': True}, + 'backend_port': {'required': True}, + 'frontend_port_range_start': {'required': True}, + 'frontend_port_range_end': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'protocol': {'key': 'protocol', 'type': 'InboundEndpointProtocol'}, + 'backend_port': {'key': 'backendPort', 'type': 'int'}, + 'frontend_port_range_start': {'key': 'frontendPortRangeStart', 'type': 'int'}, + 'frontend_port_range_end': {'key': 'frontendPortRangeEnd', 'type': 'int'}, + 'network_security_group_rules': {'key': 'networkSecurityGroupRules', 'type': '[NetworkSecurityGroupRule]'}, + } + + def __init__(self, *, name: str, protocol, backend_port: int, frontend_port_range_start: int, frontend_port_range_end: int, network_security_group_rules=None, **kwargs) -> None: + super(InboundNATPool, self).__init__(**kwargs) + self.name = name + self.protocol = protocol + self.backend_port = backend_port + self.frontend_port_range_start = frontend_port_range_start + self.frontend_port_range_end = frontend_port_range_end + self.network_security_group_rules = network_security_group_rules diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_add_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_add_options.py new file mode 100644 index 00000000..bdcf7969 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_add_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobAddOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_add_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_add_options_py3.py new file mode 100644 index 00000000..9633e748 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_add_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobAddOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_add_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/job_add_parameter.py new file mode 100644 index 00000000..23d3824e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_add_parameter.py @@ -0,0 +1,138 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobAddParameter(Model): + """An Azure Batch Job to add. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the Job within the + Account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within an Account that differ only by case). + :type id: str + :param display_name: The display name for the Job. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param priority: The priority of the Job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. The default value is 0. + :type priority: int + :param constraints: The execution constraints for the Job. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: Details of a Job Manager Task to be launched when + the Job is started. If the Job does not specify a Job Manager Task, the + user must explicitly add Tasks to the Job. If the Job does specify a Job + Manager Task, the Batch service creates the Job Manager Task when the Job + is created, and will try to schedule the Job Manager Task before + scheduling other Tasks in the Job. The Job Manager Task's typical purpose + is to control and/or monitor Job execution, for example by deciding what + additional Tasks to run, determining when the work is complete, etc. + (However, a Job Manager Task is not restricted to these activities - it is + a fully-fledged Task in the system and perform whatever actions are + required for the Job.) For example, a Job Manager Task might download a + file specified as a parameter, analyze the contents of that file and + submit additional Tasks based on those contents. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation Task. If a Job has a Job + Preparation Task, the Batch service will run the Job Preparation Task on a + Node before starting any Tasks of that Job on that Compute Node. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release Task. A Job Release Task cannot + be specified without also specifying a Job Preparation Task for the Job. + The Batch service runs the Job Release Task on the Nodes that have run the + Job Preparation Task. The primary purpose of the Job Release Task is to + undo changes to Compute Nodes made by the Job Preparation Task. Example + activities include deleting local files, or shutting down services that + were started as part of Job preparation. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: The list of common environment + variable settings. These environment variables are set for all Tasks in + the Job (including the Job Manager, Job Preparation and Job Release + Tasks). Individual Tasks can override an environment setting specified + here by specifying the same setting name with a different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: Required. The Pool on which the Batch service runs the + Job's Tasks. + :type pool_info: ~azure.batch.models.PoolInformation + :param on_all_tasks_complete: The action the Batch service should take + when all Tasks in the Job are in the completed state. Note that if a Job + contains no Tasks, then all Tasks are considered complete. This option is + therefore most commonly used with a Job Manager task; if you want to use + automatic Job termination without a Job Manager, you should initially set + onAllTasksComplete to noaction and update the Job properties to set + onAllTasksComplete to terminatejob once you have finished adding Tasks. + The default is noaction. Possible values include: 'noAction', + 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + Task in the Job fails. A Task is considered to have failed if has a + failureInfo. A failureInfo is set if the Task completes with a non-zero + exit code after exhausting its retry count, or if there was an error + starting the Task, for example due to a resource file download error. The + default is noaction. Possible values include: 'noAction', + 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param metadata: A list of name-value pairs associated with the Job as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param uses_task_dependencies: Whether Tasks in the Job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + :param network_configuration: The network configuration for the Job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration + """ + + _validation = { + 'id': {'required': True}, + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, + } + + def __init__(self, **kwargs): + super(JobAddParameter, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.priority = kwargs.get('priority', None) + self.constraints = kwargs.get('constraints', None) + self.job_manager_task = kwargs.get('job_manager_task', None) + self.job_preparation_task = kwargs.get('job_preparation_task', None) + self.job_release_task = kwargs.get('job_release_task', None) + self.common_environment_settings = kwargs.get('common_environment_settings', None) + self.pool_info = kwargs.get('pool_info', None) + self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) + self.on_task_failure = kwargs.get('on_task_failure', None) + self.metadata = kwargs.get('metadata', None) + self.uses_task_dependencies = kwargs.get('uses_task_dependencies', None) + self.network_configuration = kwargs.get('network_configuration', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_add_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_add_parameter_py3.py new file mode 100644 index 00000000..bd569ff1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_add_parameter_py3.py @@ -0,0 +1,138 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobAddParameter(Model): + """An Azure Batch Job to add. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the Job within the + Account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within an Account that differ only by case). + :type id: str + :param display_name: The display name for the Job. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param priority: The priority of the Job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. The default value is 0. + :type priority: int + :param constraints: The execution constraints for the Job. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: Details of a Job Manager Task to be launched when + the Job is started. If the Job does not specify a Job Manager Task, the + user must explicitly add Tasks to the Job. If the Job does specify a Job + Manager Task, the Batch service creates the Job Manager Task when the Job + is created, and will try to schedule the Job Manager Task before + scheduling other Tasks in the Job. The Job Manager Task's typical purpose + is to control and/or monitor Job execution, for example by deciding what + additional Tasks to run, determining when the work is complete, etc. + (However, a Job Manager Task is not restricted to these activities - it is + a fully-fledged Task in the system and perform whatever actions are + required for the Job.) For example, a Job Manager Task might download a + file specified as a parameter, analyze the contents of that file and + submit additional Tasks based on those contents. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation Task. If a Job has a Job + Preparation Task, the Batch service will run the Job Preparation Task on a + Node before starting any Tasks of that Job on that Compute Node. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release Task. A Job Release Task cannot + be specified without also specifying a Job Preparation Task for the Job. + The Batch service runs the Job Release Task on the Nodes that have run the + Job Preparation Task. The primary purpose of the Job Release Task is to + undo changes to Compute Nodes made by the Job Preparation Task. Example + activities include deleting local files, or shutting down services that + were started as part of Job preparation. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: The list of common environment + variable settings. These environment variables are set for all Tasks in + the Job (including the Job Manager, Job Preparation and Job Release + Tasks). Individual Tasks can override an environment setting specified + here by specifying the same setting name with a different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: Required. The Pool on which the Batch service runs the + Job's Tasks. + :type pool_info: ~azure.batch.models.PoolInformation + :param on_all_tasks_complete: The action the Batch service should take + when all Tasks in the Job are in the completed state. Note that if a Job + contains no Tasks, then all Tasks are considered complete. This option is + therefore most commonly used with a Job Manager task; if you want to use + automatic Job termination without a Job Manager, you should initially set + onAllTasksComplete to noaction and update the Job properties to set + onAllTasksComplete to terminatejob once you have finished adding Tasks. + The default is noaction. Possible values include: 'noAction', + 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + Task in the Job fails. A Task is considered to have failed if has a + failureInfo. A failureInfo is set if the Task completes with a non-zero + exit code after exhausting its retry count, or if there was an error + starting the Task, for example due to a resource file download error. The + default is noaction. Possible values include: 'noAction', + 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param metadata: A list of name-value pairs associated with the Job as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param uses_task_dependencies: Whether Tasks in the Job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + :param network_configuration: The network configuration for the Job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration + """ + + _validation = { + 'id': {'required': True}, + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, + } + + def __init__(self, *, id: str, pool_info, display_name: str=None, priority: int=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, on_all_tasks_complete=None, on_task_failure=None, metadata=None, uses_task_dependencies: bool=None, network_configuration=None, **kwargs) -> None: + super(JobAddParameter, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.priority = priority + self.constraints = constraints + self.job_manager_task = job_manager_task + self.job_preparation_task = job_preparation_task + self.job_release_task = job_release_task + self.common_environment_settings = common_environment_settings + self.pool_info = pool_info + self.on_all_tasks_complete = on_all_tasks_complete + self.on_task_failure = on_task_failure + self.metadata = metadata + self.uses_task_dependencies = uses_task_dependencies + self.network_configuration = network_configuration diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_constraints.py b/azext/generated/sdk/batch/v2019_08_01/models/job_constraints.py new file mode 100644 index 00000000..e53257c1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_constraints.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobConstraints(Model): + """The execution constraints for a Job. + + :param max_wall_clock_time: The maximum elapsed time that the Job may run, + measured from the time the Job is created. If the Job does not complete + within the time limit, the Batch service terminates it and any Tasks that + are still running. In this case, the termination reason will be + MaxWallClockTimeExpiry. If this property is not specified, there is no + time limit on how long the Job may run. + :type max_wall_clock_time: timedelta + :param max_task_retry_count: The maximum number of times each Task may be + retried. The Batch service retries a Task if its exit code is nonzero. + Note that this value specifically controls the number of retries. The + Batch service will try each Task once, and may then retry up to this + limit. For example, if the maximum retry count is 3, Batch tries a Task up + to 4 times (one initial try and 3 retries). If the maximum retry count is + 0, the Batch service does not retry Tasks. If the maximum retry count is + -1, the Batch service retries Tasks without limit. The default value is 0 + (no retries). + :type max_task_retry_count: int + """ + + _attribute_map = { + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(JobConstraints, self).__init__(**kwargs) + self.max_wall_clock_time = kwargs.get('max_wall_clock_time', None) + self.max_task_retry_count = kwargs.get('max_task_retry_count', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_constraints_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_constraints_py3.py new file mode 100644 index 00000000..4dc5e9c8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_constraints_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobConstraints(Model): + """The execution constraints for a Job. + + :param max_wall_clock_time: The maximum elapsed time that the Job may run, + measured from the time the Job is created. If the Job does not complete + within the time limit, the Batch service terminates it and any Tasks that + are still running. In this case, the termination reason will be + MaxWallClockTimeExpiry. If this property is not specified, there is no + time limit on how long the Job may run. + :type max_wall_clock_time: timedelta + :param max_task_retry_count: The maximum number of times each Task may be + retried. The Batch service retries a Task if its exit code is nonzero. + Note that this value specifically controls the number of retries. The + Batch service will try each Task once, and may then retry up to this + limit. For example, if the maximum retry count is 3, Batch tries a Task up + to 4 times (one initial try and 3 retries). If the maximum retry count is + 0, the Batch service does not retry Tasks. If the maximum retry count is + -1, the Batch service retries Tasks without limit. The default value is 0 + (no retries). + :type max_task_retry_count: int + """ + + _attribute_map = { + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + } + + def __init__(self, *, max_wall_clock_time=None, max_task_retry_count: int=None, **kwargs) -> None: + super(JobConstraints, self).__init__(**kwargs) + self.max_wall_clock_time = max_wall_clock_time + self.max_task_retry_count = max_task_retry_count diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_delete_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_delete_options.py new file mode 100644 index 00000000..a537b55e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_delete_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobDeleteOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_delete_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_delete_options_py3.py new file mode 100644 index 00000000..821db0e8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_delete_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobDeleteOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_disable_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_disable_options.py new file mode 100644 index 00000000..c6694516 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_disable_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDisableOptions(Model): + """Additional parameters for disable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobDisableOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_disable_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_disable_options_py3.py new file mode 100644 index 00000000..4b077714 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_disable_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDisableOptions(Model): + """Additional parameters for disable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobDisableOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_disable_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/job_disable_parameter.py new file mode 100644 index 00000000..75f02201 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_disable_parameter.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDisableParameter(Model): + """Options when disabling a Job. + + All required parameters must be populated in order to send to Azure. + + :param disable_tasks: Required. What to do with active Tasks associated + with the Job. Possible values include: 'requeue', 'terminate', 'wait' + :type disable_tasks: str or ~azure.batch.models.DisableJobOption + """ + + _validation = { + 'disable_tasks': {'required': True}, + } + + _attribute_map = { + 'disable_tasks': {'key': 'disableTasks', 'type': 'DisableJobOption'}, + } + + def __init__(self, **kwargs): + super(JobDisableParameter, self).__init__(**kwargs) + self.disable_tasks = kwargs.get('disable_tasks', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_disable_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_disable_parameter_py3.py new file mode 100644 index 00000000..9fb96f22 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_disable_parameter_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobDisableParameter(Model): + """Options when disabling a Job. + + All required parameters must be populated in order to send to Azure. + + :param disable_tasks: Required. What to do with active Tasks associated + with the Job. Possible values include: 'requeue', 'terminate', 'wait' + :type disable_tasks: str or ~azure.batch.models.DisableJobOption + """ + + _validation = { + 'disable_tasks': {'required': True}, + } + + _attribute_map = { + 'disable_tasks': {'key': 'disableTasks', 'type': 'DisableJobOption'}, + } + + def __init__(self, *, disable_tasks, **kwargs) -> None: + super(JobDisableParameter, self).__init__(**kwargs) + self.disable_tasks = disable_tasks diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_enable_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_enable_options.py new file mode 100644 index 00000000..182f2b04 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_enable_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobEnableOptions(Model): + """Additional parameters for enable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobEnableOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_enable_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_enable_options_py3.py new file mode 100644 index 00000000..47695f37 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_enable_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobEnableOptions(Model): + """Additional parameters for enable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobEnableOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_execution_information.py b/azext/generated/sdk/batch/v2019_08_01/models/job_execution_information.py new file mode 100644 index 00000000..bca20293 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_execution_information.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobExecutionInformation(Model): + """Contains information about the execution of a Job in the Azure Batch + service. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the Job. This is the time + at which the Job was created. + :type start_time: datetime + :param end_time: The completion time of the Job. This property is set only + if the Job is in the completed state. + :type end_time: datetime + :param pool_id: The ID of the Pool to which this Job is assigned. This + element contains the actual Pool where the Job is assigned. When you get + Job details from the service, they also contain a poolInfo element, which + contains the Pool configuration data from when the Job was added or + updated. That poolInfo element may also contain a poolId element. If it + does, the two IDs are the same. If it does not, it means the Job ran on an + auto Pool, and this property contains the ID of that auto Pool. + :type pool_id: str + :param scheduling_error: Details of any error encountered by the service + in starting the Job. This property is not set if there was no error + starting the Job. + :type scheduling_error: ~azure.batch.models.JobSchedulingError + :param terminate_reason: A string describing the reason the Job ended. + This property is set only if the Job is in the completed state. If the + Batch service terminates the Job, it sets the reason as follows: + JMComplete - the Job Manager Task completed, and killJobOnCompletion was + set to true. MaxWallClockTimeExpiry - the Job reached its maxWallClockTime + constraint. TerminateJobSchedule - the Job ran as part of a schedule, and + the schedule terminated. AllTasksComplete - the Job's onAllTasksComplete + attribute is set to terminatejob, and all Tasks in the Job are complete. + TaskFailed - the Job's onTaskFailure attribute is set to + performExitOptionsJobAction, and a Task in the Job failed with an exit + condition that specified a jobAction of terminatejob. Any other string is + a user-defined reason specified in a call to the 'Terminate a Job' + operation. + :type terminate_reason: str + """ + + _validation = { + 'start_time': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'scheduling_error': {'key': 'schedulingError', 'type': 'JobSchedulingError'}, + 'terminate_reason': {'key': 'terminateReason', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(JobExecutionInformation, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.pool_id = kwargs.get('pool_id', None) + self.scheduling_error = kwargs.get('scheduling_error', None) + self.terminate_reason = kwargs.get('terminate_reason', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_execution_information_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_execution_information_py3.py new file mode 100644 index 00000000..0097b52f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_execution_information_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobExecutionInformation(Model): + """Contains information about the execution of a Job in the Azure Batch + service. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the Job. This is the time + at which the Job was created. + :type start_time: datetime + :param end_time: The completion time of the Job. This property is set only + if the Job is in the completed state. + :type end_time: datetime + :param pool_id: The ID of the Pool to which this Job is assigned. This + element contains the actual Pool where the Job is assigned. When you get + Job details from the service, they also contain a poolInfo element, which + contains the Pool configuration data from when the Job was added or + updated. That poolInfo element may also contain a poolId element. If it + does, the two IDs are the same. If it does not, it means the Job ran on an + auto Pool, and this property contains the ID of that auto Pool. + :type pool_id: str + :param scheduling_error: Details of any error encountered by the service + in starting the Job. This property is not set if there was no error + starting the Job. + :type scheduling_error: ~azure.batch.models.JobSchedulingError + :param terminate_reason: A string describing the reason the Job ended. + This property is set only if the Job is in the completed state. If the + Batch service terminates the Job, it sets the reason as follows: + JMComplete - the Job Manager Task completed, and killJobOnCompletion was + set to true. MaxWallClockTimeExpiry - the Job reached its maxWallClockTime + constraint. TerminateJobSchedule - the Job ran as part of a schedule, and + the schedule terminated. AllTasksComplete - the Job's onAllTasksComplete + attribute is set to terminatejob, and all Tasks in the Job are complete. + TaskFailed - the Job's onTaskFailure attribute is set to + performExitOptionsJobAction, and a Task in the Job failed with an exit + condition that specified a jobAction of terminatejob. Any other string is + a user-defined reason specified in a call to the 'Terminate a Job' + operation. + :type terminate_reason: str + """ + + _validation = { + 'start_time': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'scheduling_error': {'key': 'schedulingError', 'type': 'JobSchedulingError'}, + 'terminate_reason': {'key': 'terminateReason', 'type': 'str'}, + } + + def __init__(self, *, start_time, end_time=None, pool_id: str=None, scheduling_error=None, terminate_reason: str=None, **kwargs) -> None: + super(JobExecutionInformation, self).__init__(**kwargs) + self.start_time = start_time + self.end_time = end_time + self.pool_id = pool_id + self.scheduling_error = scheduling_error + self.terminate_reason = terminate_reason diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_get_all_lifetime_statistics_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_get_all_lifetime_statistics_options.py new file mode 100644 index 00000000..a8f7e849 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_get_all_lifetime_statistics_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetAllLifetimeStatisticsOptions(Model): + """Additional parameters for get_all_lifetime_statistics operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobGetAllLifetimeStatisticsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_get_all_lifetime_statistics_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_get_all_lifetime_statistics_options_py3.py new file mode 100644 index 00000000..2092bbd8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_get_all_lifetime_statistics_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetAllLifetimeStatisticsOptions(Model): + """Additional parameters for get_all_lifetime_statistics operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobGetAllLifetimeStatisticsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_get_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_get_options.py new file mode 100644 index 00000000..62d47959 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_get_options.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_get_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_get_options_py3.py new file mode 100644 index 00000000..9ed21fc3 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_get_options_py3.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobGetOptions, self).__init__(**kwargs) + self.select = select + self.expand = expand + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_get_task_counts_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_get_task_counts_options.py new file mode 100644 index 00000000..603d79ce --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_get_task_counts_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetTaskCountsOptions(Model): + """Additional parameters for get_task_counts operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobGetTaskCountsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_get_task_counts_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_get_task_counts_options_py3.py new file mode 100644 index 00000000..b109e59e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_get_task_counts_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobGetTaskCountsOptions(Model): + """Additional parameters for get_task_counts operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobGetTaskCountsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_list_from_job_schedule_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_list_from_job_schedule_options.py new file mode 100644 index 00000000..3d6a86bd --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_list_from_job_schedule_options.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListFromJobScheduleOptions(Model): + """Additional parameters for list_from_job_schedule operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Jobs can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobListFromJobScheduleOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_list_from_job_schedule_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_list_from_job_schedule_options_py3.py new file mode 100644 index 00000000..6b9b05e1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_list_from_job_schedule_options_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListFromJobScheduleOptions(Model): + """Additional parameters for list_from_job_schedule operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Jobs can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobListFromJobScheduleOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.expand = expand + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_list_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_list_options.py new file mode 100644 index 00000000..6d926dba --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_list_options.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Jobs can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_list_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_list_options_py3.py new file mode 100644 index 00000000..356f33fa --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_list_options_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Jobs can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.expand = expand + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_list_preparation_and_release_task_status_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_list_preparation_and_release_task_status_options.py new file mode 100644 index 00000000..d6607e15 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_list_preparation_and_release_task_status_options.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListPreparationAndReleaseTaskStatusOptions(Model): + """Additional parameters for list_preparation_and_release_task_status + operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Tasks can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobListPreparationAndReleaseTaskStatusOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_list_preparation_and_release_task_status_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_list_preparation_and_release_task_status_options_py3.py new file mode 100644 index 00000000..a1332ba8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_list_preparation_and_release_task_status_options_py3.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobListPreparationAndReleaseTaskStatusOptions(Model): + """Additional parameters for list_preparation_and_release_task_status + operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Tasks can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobListPreparationAndReleaseTaskStatusOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_manager_task.py b/azext/generated/sdk/batch/v2019_08_01/models/job_manager_task.py new file mode 100644 index 00000000..fde7d416 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_manager_task.py @@ -0,0 +1,185 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobManagerTask(Model): + """Specifies details of a Job Manager Task. + + The Job Manager Task is automatically started when the Job is created. The + Batch service tries to schedule the Job Manager Task before any other Tasks + in the Job. When shrinking a Pool, the Batch service tries to preserve + Nodes where Job Manager Tasks are running for as long as possible (that is, + Compute Nodes running 'normal' Tasks are removed before Compute Nodes + running Job Manager Tasks). When a Job Manager Task fails and needs to be + restarted, the system tries to schedule it at the highest priority. If + there are no idle Compute Nodes available, the system may terminate one of + the running Tasks in the Pool and return it to the queue in order to make + room for the Job Manager Task to restart. Note that a Job Manager Task in + one Job does not have priority over Tasks in other Jobs. Across Jobs, only + Job level priorities are observed. For example, if a Job Manager in a + priority 0 Job needs to be restarted, it will not displace Tasks of a + priority 1 Job. Batch will retry Tasks when a recovery operation is + triggered on a Node. Examples of recovery operations include (but are not + limited to) when an unhealthy Node is rebooted or a Compute Node + disappeared due to host failure. Retries due to recovery operations are + independent of and are not counted against the maxTaskRetryCount. Even if + the maxTaskRetryCount is 0, an internal retry due to a recovery operation + may occur. Because of this, all Tasks should be idempotent. This means + Tasks need to tolerate being interrupted and restarted without causing any + corruption or duplicate data. The best practice for long running Tasks is + to use some form of checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the Job Manager + Task within the Job. The ID can contain any combination of alphanumeric + characters including hyphens and underscores and cannot contain more than + 64 characters. + :type id: str + :param display_name: The display name of the Job Manager Task. It need not + be unique and can contain any Unicode characters up to a maximum length of + 1024. + :type display_name: str + :param command_line: Required. The command line of the Job Manager Task. + The command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the Task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Manager Task runs. If the Pool that will run this Task has + containerConfiguration set, this must be set as well. If the Pool that + will run this Task doesn't have containerConfiguration set, this must not + be set. When this is specified, all directories recursively below the + AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) + are mapped into the container, all Task environment variables are mapped + into the container, and the Task command line is executed in the + container. Files produced in the container outside of + AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning + that Batch file APIs will not be able to access those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. Files listed + under this element are located in the Task's working directory. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the Compute Node after running the command line. For multi-instance + Tasks, the files will only be uploaded from the Compute Node on which the + primary Task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the Job Manager Task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param constraints: Constraints that apply to the Job Manager Task. + :type constraints: ~azure.batch.models.TaskConstraints + :param kill_job_on_completion: Whether completion of the Job Manager Task + signifies completion of the entire Job. If true, when the Job Manager Task + completes, the Batch service marks the Job as complete. If any Tasks are + still running at this time (other than Job Release), those Tasks are + terminated. If false, the completion of the Job Manager Task does not + affect the Job status. In this case, you should either use the + onAllTasksComplete attribute to terminate the Job, or have a client or + user terminate the Job explicitly. An example of this is if the Job + Manager creates a set of Tasks but then takes no further role in their + execution. The default value is true. If you are using the + onAllTasksComplete and onTaskFailure attributes to control Job lifetime, + and using the Job Manager Task only to create the Tasks for the Job (not + to monitor progress), then it is important to set killJobOnCompletion to + false. + :type kill_job_on_completion: bool + :param user_identity: The user identity under which the Job Manager Task + runs. If omitted, the Task runs as a non-administrative user unique to the + Task. + :type user_identity: ~azure.batch.models.UserIdentity + :param run_exclusive: Whether the Job Manager Task requires exclusive use + of the Compute Node where it runs. If true, no other Tasks will run on the + same Node for as long as the Job Manager is running. If false, other Tasks + can run simultaneously with the Job Manager on a Compute Node. The Job + Manager Task counts normally against the Compute Node's concurrent Task + limit, so this is only relevant if the Compute Node allows multiple + concurrent Tasks. The default value is true. + :type run_exclusive: bool + :param application_package_references: A list of Application Packages that + the Batch service will deploy to the Compute Node before running the + command line. Application Packages are downloaded and deployed to a shared + directory, not the Task working directory. Therefore, if a referenced + Application Package is already on the Compute Node, and is up to date, + then it is not re-downloaded; the existing copy on the Compute Node is + used. If a referenced Application Package cannot be installed, for example + because the package has been deleted or because download failed, the Task + fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the Task can use to perform Batch service operations. If this + property is set, the Batch service provides the Task with an + authentication token which can be used to authenticate Batch service + operations without requiring an Account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the Task can carry out using the token depend on the settings. For + example, a Task can request Job permissions in order to add other Tasks to + the Job, or check the status of the Job or of other Tasks under the Job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + :param allow_low_priority_node: Whether the Job Manager Task may run on a + low-priority Compute Node. The default value is true. + :type allow_low_priority_node: bool + """ + + _validation = { + 'id': {'required': True}, + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'kill_job_on_completion': {'key': 'killJobOnCompletion', 'type': 'bool'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'run_exclusive': {'key': 'runExclusive', 'type': 'bool'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + 'allow_low_priority_node': {'key': 'allowLowPriorityNode', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(JobManagerTask, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.resource_files = kwargs.get('resource_files', None) + self.output_files = kwargs.get('output_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.constraints = kwargs.get('constraints', None) + self.kill_job_on_completion = kwargs.get('kill_job_on_completion', None) + self.user_identity = kwargs.get('user_identity', None) + self.run_exclusive = kwargs.get('run_exclusive', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.authentication_token_settings = kwargs.get('authentication_token_settings', None) + self.allow_low_priority_node = kwargs.get('allow_low_priority_node', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_manager_task_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_manager_task_py3.py new file mode 100644 index 00000000..cbcb37aa --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_manager_task_py3.py @@ -0,0 +1,185 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobManagerTask(Model): + """Specifies details of a Job Manager Task. + + The Job Manager Task is automatically started when the Job is created. The + Batch service tries to schedule the Job Manager Task before any other Tasks + in the Job. When shrinking a Pool, the Batch service tries to preserve + Nodes where Job Manager Tasks are running for as long as possible (that is, + Compute Nodes running 'normal' Tasks are removed before Compute Nodes + running Job Manager Tasks). When a Job Manager Task fails and needs to be + restarted, the system tries to schedule it at the highest priority. If + there are no idle Compute Nodes available, the system may terminate one of + the running Tasks in the Pool and return it to the queue in order to make + room for the Job Manager Task to restart. Note that a Job Manager Task in + one Job does not have priority over Tasks in other Jobs. Across Jobs, only + Job level priorities are observed. For example, if a Job Manager in a + priority 0 Job needs to be restarted, it will not displace Tasks of a + priority 1 Job. Batch will retry Tasks when a recovery operation is + triggered on a Node. Examples of recovery operations include (but are not + limited to) when an unhealthy Node is rebooted or a Compute Node + disappeared due to host failure. Retries due to recovery operations are + independent of and are not counted against the maxTaskRetryCount. Even if + the maxTaskRetryCount is 0, an internal retry due to a recovery operation + may occur. Because of this, all Tasks should be idempotent. This means + Tasks need to tolerate being interrupted and restarted without causing any + corruption or duplicate data. The best practice for long running Tasks is + to use some form of checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the Job Manager + Task within the Job. The ID can contain any combination of alphanumeric + characters including hyphens and underscores and cannot contain more than + 64 characters. + :type id: str + :param display_name: The display name of the Job Manager Task. It need not + be unique and can contain any Unicode characters up to a maximum length of + 1024. + :type display_name: str + :param command_line: Required. The command line of the Job Manager Task. + The command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the Task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Manager Task runs. If the Pool that will run this Task has + containerConfiguration set, this must be set as well. If the Pool that + will run this Task doesn't have containerConfiguration set, this must not + be set. When this is specified, all directories recursively below the + AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) + are mapped into the container, all Task environment variables are mapped + into the container, and the Task command line is executed in the + container. Files produced in the container outside of + AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning + that Batch file APIs will not be able to access those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. Files listed + under this element are located in the Task's working directory. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the Compute Node after running the command line. For multi-instance + Tasks, the files will only be uploaded from the Compute Node on which the + primary Task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the Job Manager Task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param constraints: Constraints that apply to the Job Manager Task. + :type constraints: ~azure.batch.models.TaskConstraints + :param kill_job_on_completion: Whether completion of the Job Manager Task + signifies completion of the entire Job. If true, when the Job Manager Task + completes, the Batch service marks the Job as complete. If any Tasks are + still running at this time (other than Job Release), those Tasks are + terminated. If false, the completion of the Job Manager Task does not + affect the Job status. In this case, you should either use the + onAllTasksComplete attribute to terminate the Job, or have a client or + user terminate the Job explicitly. An example of this is if the Job + Manager creates a set of Tasks but then takes no further role in their + execution. The default value is true. If you are using the + onAllTasksComplete and onTaskFailure attributes to control Job lifetime, + and using the Job Manager Task only to create the Tasks for the Job (not + to monitor progress), then it is important to set killJobOnCompletion to + false. + :type kill_job_on_completion: bool + :param user_identity: The user identity under which the Job Manager Task + runs. If omitted, the Task runs as a non-administrative user unique to the + Task. + :type user_identity: ~azure.batch.models.UserIdentity + :param run_exclusive: Whether the Job Manager Task requires exclusive use + of the Compute Node where it runs. If true, no other Tasks will run on the + same Node for as long as the Job Manager is running. If false, other Tasks + can run simultaneously with the Job Manager on a Compute Node. The Job + Manager Task counts normally against the Compute Node's concurrent Task + limit, so this is only relevant if the Compute Node allows multiple + concurrent Tasks. The default value is true. + :type run_exclusive: bool + :param application_package_references: A list of Application Packages that + the Batch service will deploy to the Compute Node before running the + command line. Application Packages are downloaded and deployed to a shared + directory, not the Task working directory. Therefore, if a referenced + Application Package is already on the Compute Node, and is up to date, + then it is not re-downloaded; the existing copy on the Compute Node is + used. If a referenced Application Package cannot be installed, for example + because the package has been deleted or because download failed, the Task + fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the Task can use to perform Batch service operations. If this + property is set, the Batch service provides the Task with an + authentication token which can be used to authenticate Batch service + operations without requiring an Account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the Task can carry out using the token depend on the settings. For + example, a Task can request Job permissions in order to add other Tasks to + the Job, or check the status of the Job or of other Tasks under the Job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + :param allow_low_priority_node: Whether the Job Manager Task may run on a + low-priority Compute Node. The default value is true. + :type allow_low_priority_node: bool + """ + + _validation = { + 'id': {'required': True}, + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'kill_job_on_completion': {'key': 'killJobOnCompletion', 'type': 'bool'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'run_exclusive': {'key': 'runExclusive', 'type': 'bool'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + 'allow_low_priority_node': {'key': 'allowLowPriorityNode', 'type': 'bool'}, + } + + def __init__(self, *, id: str, command_line: str, display_name: str=None, container_settings=None, resource_files=None, output_files=None, environment_settings=None, constraints=None, kill_job_on_completion: bool=None, user_identity=None, run_exclusive: bool=None, application_package_references=None, authentication_token_settings=None, allow_low_priority_node: bool=None, **kwargs) -> None: + super(JobManagerTask, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.command_line = command_line + self.container_settings = container_settings + self.resource_files = resource_files + self.output_files = output_files + self.environment_settings = environment_settings + self.constraints = constraints + self.kill_job_on_completion = kill_job_on_completion + self.user_identity = user_identity + self.run_exclusive = run_exclusive + self.application_package_references = application_package_references + self.authentication_token_settings = authentication_token_settings + self.allow_low_priority_node = allow_low_priority_node diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_network_configuration.py b/azext/generated/sdk/batch/v2019_08_01/models/job_network_configuration.py new file mode 100644 index 00000000..9a566c67 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_network_configuration.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobNetworkConfiguration(Model): + """The network configuration for the Job. + + All required parameters must be populated in order to send to Azure. + + :param subnet_id: Required. The ARM resource identifier of the virtual + network subnet which Compute Nodes running Tasks from the Job will join + for the duration of the Task. This will only work with a + VirtualMachineConfiguration Pool. The virtual network must be in the same + region and subscription as the Azure Batch Account. The specified subnet + should have enough free IP addresses to accommodate the number of Compute + Nodes which will run Tasks from the Job. This can be up to the number of + Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal + must have the 'Classic Virtual Machine Contributor' Role-Based Access + Control (RBAC) role for the specified VNet so that Azure Batch service can + schedule Tasks on the Nodes. This can be verified by checking if the + specified VNet has any associated Network Security Groups (NSG). If + communication to the Nodes in the specified subnet is denied by an NSG, + then the Batch service will set the state of the Compute Nodes to + unusable. This is of the form + /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. + If the specified VNet has any associated Network Security Groups (NSG), + then a few reserved system ports must be enabled for inbound communication + from the Azure Batch service. For Pools created with a Virtual Machine + configuration, enable ports 29876 and 29877, as well as port 22 for Linux + and port 3389 for Windows. Port 443 is also required to be open for + outbound connections for communications to Azure Storage. For more details + see: + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration + :type subnet_id: str + """ + + _validation = { + 'subnet_id': {'required': True}, + } + + _attribute_map = { + 'subnet_id': {'key': 'subnetId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(JobNetworkConfiguration, self).__init__(**kwargs) + self.subnet_id = kwargs.get('subnet_id', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_network_configuration_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_network_configuration_py3.py new file mode 100644 index 00000000..dfea024a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_network_configuration_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobNetworkConfiguration(Model): + """The network configuration for the Job. + + All required parameters must be populated in order to send to Azure. + + :param subnet_id: Required. The ARM resource identifier of the virtual + network subnet which Compute Nodes running Tasks from the Job will join + for the duration of the Task. This will only work with a + VirtualMachineConfiguration Pool. The virtual network must be in the same + region and subscription as the Azure Batch Account. The specified subnet + should have enough free IP addresses to accommodate the number of Compute + Nodes which will run Tasks from the Job. This can be up to the number of + Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal + must have the 'Classic Virtual Machine Contributor' Role-Based Access + Control (RBAC) role for the specified VNet so that Azure Batch service can + schedule Tasks on the Nodes. This can be verified by checking if the + specified VNet has any associated Network Security Groups (NSG). If + communication to the Nodes in the specified subnet is denied by an NSG, + then the Batch service will set the state of the Compute Nodes to + unusable. This is of the form + /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. + If the specified VNet has any associated Network Security Groups (NSG), + then a few reserved system ports must be enabled for inbound communication + from the Azure Batch service. For Pools created with a Virtual Machine + configuration, enable ports 29876 and 29877, as well as port 22 for Linux + and port 3389 for Windows. Port 443 is also required to be open for + outbound connections for communications to Azure Storage. For more details + see: + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration + :type subnet_id: str + """ + + _validation = { + 'subnet_id': {'required': True}, + } + + _attribute_map = { + 'subnet_id': {'key': 'subnetId', 'type': 'str'}, + } + + def __init__(self, *, subnet_id: str, **kwargs) -> None: + super(JobNetworkConfiguration, self).__init__(**kwargs) + self.subnet_id = subnet_id diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_patch_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_patch_options.py new file mode 100644 index 00000000..9fdbb4f3 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_patch_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobPatchOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_patch_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_patch_options_py3.py new file mode 100644 index 00000000..586e381d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_patch_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobPatchOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_patch_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/job_patch_parameter.py new file mode 100644 index 00000000..db90a7aa --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_patch_parameter.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPatchParameter(Model): + """The set of changes to be made to a Job. + + :param priority: The priority of the Job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. If omitted, the priority of the Job is left unchanged. + :type priority: int + :param on_all_tasks_complete: The action the Batch service should take + when all Tasks in the Job are in the completed state. If omitted, the + completion behavior is left unchanged. You may not change the value from + terminatejob to noaction - that is, once you have engaged automatic Job + termination, you cannot turn it off again. If you try to do this, the + request fails with an 'invalid property value' error response; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + Possible values include: 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param constraints: The execution constraints for the Job. If omitted, the + existing execution constraints are left unchanged. + :type constraints: ~azure.batch.models.JobConstraints + :param pool_info: The Pool on which the Batch service runs the Job's + Tasks. You may change the Pool for a Job only when the Job is disabled. + The Patch Job call will fail if you include the poolInfo element and the + Job is not disabled. If you specify an autoPoolSpecification in the + poolInfo, only the keepAlive property of the autoPoolSpecification can be + updated, and then only if the autoPoolSpecification has a + poolLifetimeOption of Job (other job properties can be updated as normal). + If omitted, the Job continues to run on its current Pool. + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with the Job as + metadata. If omitted, the existing Job metadata is left unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(JobPatchParameter, self).__init__(**kwargs) + self.priority = kwargs.get('priority', None) + self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) + self.constraints = kwargs.get('constraints', None) + self.pool_info = kwargs.get('pool_info', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_patch_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_patch_parameter_py3.py new file mode 100644 index 00000000..c8d7f349 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_patch_parameter_py3.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPatchParameter(Model): + """The set of changes to be made to a Job. + + :param priority: The priority of the Job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. If omitted, the priority of the Job is left unchanged. + :type priority: int + :param on_all_tasks_complete: The action the Batch service should take + when all Tasks in the Job are in the completed state. If omitted, the + completion behavior is left unchanged. You may not change the value from + terminatejob to noaction - that is, once you have engaged automatic Job + termination, you cannot turn it off again. If you try to do this, the + request fails with an 'invalid property value' error response; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + Possible values include: 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param constraints: The execution constraints for the Job. If omitted, the + existing execution constraints are left unchanged. + :type constraints: ~azure.batch.models.JobConstraints + :param pool_info: The Pool on which the Batch service runs the Job's + Tasks. You may change the Pool for a Job only when the Job is disabled. + The Patch Job call will fail if you include the poolInfo element and the + Job is not disabled. If you specify an autoPoolSpecification in the + poolInfo, only the keepAlive property of the autoPoolSpecification can be + updated, and then only if the autoPoolSpecification has a + poolLifetimeOption of Job (other job properties can be updated as normal). + If omitted, the Job continues to run on its current Pool. + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with the Job as + metadata. If omitted, the existing Job metadata is left unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, priority: int=None, on_all_tasks_complete=None, constraints=None, pool_info=None, metadata=None, **kwargs) -> None: + super(JobPatchParameter, self).__init__(**kwargs) + self.priority = priority + self.on_all_tasks_complete = on_all_tasks_complete + self.constraints = constraints + self.pool_info = pool_info + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_preparation_and_release_task_execution_information.py b/azext/generated/sdk/batch/v2019_08_01/models/job_preparation_and_release_task_execution_information.py new file mode 100644 index 00000000..21fd6c88 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_preparation_and_release_task_execution_information.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationAndReleaseTaskExecutionInformation(Model): + """The status of the Job Preparation and Job Release Tasks on a Compute Node. + + :param pool_id: The ID of the Pool containing the Compute Node to which + this entry refers. + :type pool_id: str + :param node_id: The ID of the Compute Node to which this entry refers. + :type node_id: str + :param node_url: The URL of the Compute Node to which this entry refers. + :type node_url: str + :param job_preparation_task_execution_info: Information about the + execution status of the Job Preparation Task on this Compute Node. + :type job_preparation_task_execution_info: + ~azure.batch.models.JobPreparationTaskExecutionInformation + :param job_release_task_execution_info: Information about the execution + status of the Job Release Task on this Compute Node. This property is set + only if the Job Release Task has run on the Compute Node. + :type job_release_task_execution_info: + ~azure.batch.models.JobReleaseTaskExecutionInformation + """ + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'node_id': {'key': 'nodeId', 'type': 'str'}, + 'node_url': {'key': 'nodeUrl', 'type': 'str'}, + 'job_preparation_task_execution_info': {'key': 'jobPreparationTaskExecutionInfo', 'type': 'JobPreparationTaskExecutionInformation'}, + 'job_release_task_execution_info': {'key': 'jobReleaseTaskExecutionInfo', 'type': 'JobReleaseTaskExecutionInformation'}, + } + + def __init__(self, **kwargs): + super(JobPreparationAndReleaseTaskExecutionInformation, self).__init__(**kwargs) + self.pool_id = kwargs.get('pool_id', None) + self.node_id = kwargs.get('node_id', None) + self.node_url = kwargs.get('node_url', None) + self.job_preparation_task_execution_info = kwargs.get('job_preparation_task_execution_info', None) + self.job_release_task_execution_info = kwargs.get('job_release_task_execution_info', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_preparation_and_release_task_execution_information_paged.py b/azext/generated/sdk/batch/v2019_08_01/models/job_preparation_and_release_task_execution_information_paged.py new file mode 100644 index 00000000..f1f7d3c4 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_preparation_and_release_task_execution_information_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class JobPreparationAndReleaseTaskExecutionInformationPaged(Paged): + """ + A paging container for iterating over a list of :class:`JobPreparationAndReleaseTaskExecutionInformation ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[JobPreparationAndReleaseTaskExecutionInformation]'} + } + + def __init__(self, *args, **kwargs): + + super(JobPreparationAndReleaseTaskExecutionInformationPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_preparation_and_release_task_execution_information_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_preparation_and_release_task_execution_information_py3.py new file mode 100644 index 00000000..8c998d37 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_preparation_and_release_task_execution_information_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationAndReleaseTaskExecutionInformation(Model): + """The status of the Job Preparation and Job Release Tasks on a Compute Node. + + :param pool_id: The ID of the Pool containing the Compute Node to which + this entry refers. + :type pool_id: str + :param node_id: The ID of the Compute Node to which this entry refers. + :type node_id: str + :param node_url: The URL of the Compute Node to which this entry refers. + :type node_url: str + :param job_preparation_task_execution_info: Information about the + execution status of the Job Preparation Task on this Compute Node. + :type job_preparation_task_execution_info: + ~azure.batch.models.JobPreparationTaskExecutionInformation + :param job_release_task_execution_info: Information about the execution + status of the Job Release Task on this Compute Node. This property is set + only if the Job Release Task has run on the Compute Node. + :type job_release_task_execution_info: + ~azure.batch.models.JobReleaseTaskExecutionInformation + """ + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'node_id': {'key': 'nodeId', 'type': 'str'}, + 'node_url': {'key': 'nodeUrl', 'type': 'str'}, + 'job_preparation_task_execution_info': {'key': 'jobPreparationTaskExecutionInfo', 'type': 'JobPreparationTaskExecutionInformation'}, + 'job_release_task_execution_info': {'key': 'jobReleaseTaskExecutionInfo', 'type': 'JobReleaseTaskExecutionInformation'}, + } + + def __init__(self, *, pool_id: str=None, node_id: str=None, node_url: str=None, job_preparation_task_execution_info=None, job_release_task_execution_info=None, **kwargs) -> None: + super(JobPreparationAndReleaseTaskExecutionInformation, self).__init__(**kwargs) + self.pool_id = pool_id + self.node_id = node_id + self.node_url = node_url + self.job_preparation_task_execution_info = job_preparation_task_execution_info + self.job_release_task_execution_info = job_release_task_execution_info diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_preparation_task.py b/azext/generated/sdk/batch/v2019_08_01/models/job_preparation_task.py new file mode 100644 index 00000000..158747f4 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_preparation_task.py @@ -0,0 +1,147 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationTask(Model): + """A Job Preparation Task to run before any Tasks of the Job on any given + Compute Node. + + You can use Job Preparation to prepare a Node to run Tasks for the Job. + Activities commonly performed in Job Preparation include: Downloading + common resource files used by all the Tasks in the Job. The Job Preparation + Task can download these common resource files to the shared location on the + Node. (AZ_BATCH_NODE_ROOT_DIR\shared), or starting a local service on the + Node so that all Tasks of that Job can communicate with it. If the Job + Preparation Task fails (that is, exhausts its retry count before exiting + with exit code 0), Batch will not run Tasks of this Job on the Node. The + Compute Node remains ineligible to run Tasks of this Job until it is + reimaged. The Compute Node remains active and can be used for other Jobs. + The Job Preparation Task can run multiple times on the same Node. + Therefore, you should write the Job Preparation Task to handle + re-execution. If the Node is rebooted, the Job Preparation Task is run + again on the Compute Node before scheduling any other Task of the Job, if + rerunOnNodeRebootAfterSuccess is true or if the Job Preparation Task did + not previously complete. If the Node is reimaged, the Job Preparation Task + is run again before scheduling any Task of the Job. Batch will retry Tasks + when a recovery operation is triggered on a Node. Examples of recovery + operations include (but are not limited to) when an unhealthy Node is + rebooted or a Compute Node disappeared due to host failure. Retries due to + recovery operations are independent of and are not counted against the + maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal retry + due to a recovery operation may occur. Because of this, all Tasks should be + idempotent. This means Tasks need to tolerate being interrupted and + restarted without causing any corruption or duplicate data. The best + practice for long running Tasks is to use some form of checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: A string that uniquely identifies the Job Preparation Task + within the Job. The ID can contain any combination of alphanumeric + characters including hyphens and underscores and cannot contain more than + 64 characters. If you do not specify this property, the Batch service + assigns a default value of 'jobpreparation'. No other Task in the Job can + have the same ID as the Job Preparation Task. If you try to submit a Task + with the same id, the Batch service rejects the request with error code + TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, + the HTTP status code is 409 (Conflict). + :type id: str + :param command_line: Required. The command line of the Job Preparation + Task. The command line does not run under a shell, and therefore cannot + take advantage of shell features such as environment variable expansion. + If you want to take advantage of such features, you should invoke the + shell in the command line, for example using "cmd /c MyCommand" in Windows + or "/bin/sh -c MyCommand" in Linux. If the command line refers to file + paths, it should use a relative path (relative to the Task working + directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Preparation Task runs. When this is specified, all directories + recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch + directories on the node) are mapped into the container, all Task + environment variables are mapped into the container, and the Task command + line is executed in the container. Files produced in the container outside + of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning + that Batch file APIs will not be able to access those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. Files listed + under this element are located in the Task's working directory. There is + a maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the Job Preparation Task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param constraints: Constraints that apply to the Job Preparation Task. + :type constraints: ~azure.batch.models.TaskConstraints + :param wait_for_success: Whether the Batch service should wait for the Job + Preparation Task to complete successfully before scheduling any other + Tasks of the Job on the Compute Node. A Job Preparation Task has completed + successfully if it exits with exit code 0. If true and the Job Preparation + Task fails on a Node, the Batch service retries the Job Preparation Task + up to its maximum retry count (as specified in the constraints element). + If the Task has still not completed successfully after all retries, then + the Batch service will not schedule Tasks of the Job to the Node. The Node + remains active and eligible to run Tasks of other Jobs. If false, the + Batch service will not wait for the Job Preparation Task to complete. In + this case, other Tasks of the Job can start executing on the Compute Node + while the Job Preparation Task is still running; and even if the Job + Preparation Task fails, new Tasks will continue to be scheduled on the + Compute Node. The default value is true. + :type wait_for_success: bool + :param user_identity: The user identity under which the Job Preparation + Task runs. If omitted, the Task runs as a non-administrative user unique + to the Task on Windows Compute Nodes, or a non-administrative user unique + to the Pool on Linux Compute Nodes. + :type user_identity: ~azure.batch.models.UserIdentity + :param rerun_on_node_reboot_after_success: Whether the Batch service + should rerun the Job Preparation Task after a Compute Node reboots. The + Job Preparation Task is always rerun if a Compute Node is reimaged, or if + the Job Preparation Task did not complete (e.g. because the reboot + occurred while the Task was running). Therefore, you should always write a + Job Preparation Task to be idempotent and to behave correctly if run + multiple times. The default value is true. + :type rerun_on_node_reboot_after_success: bool + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'rerun_on_node_reboot_after_success': {'key': 'rerunOnNodeRebootAfterSuccess', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(JobPreparationTask, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.resource_files = kwargs.get('resource_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.constraints = kwargs.get('constraints', None) + self.wait_for_success = kwargs.get('wait_for_success', None) + self.user_identity = kwargs.get('user_identity', None) + self.rerun_on_node_reboot_after_success = kwargs.get('rerun_on_node_reboot_after_success', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_preparation_task_execution_information.py b/azext/generated/sdk/batch/v2019_08_01/models/job_preparation_task_execution_information.py new file mode 100644 index 00000000..3e1edeff --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_preparation_task_execution_information.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationTaskExecutionInformation(Model): + """Contains information about the execution of a Job Preparation Task on a + Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The time at which the Task started running. + If the Task has been restarted or retried, this is the most recent time at + which the Task started running. + :type start_time: datetime + :param end_time: The time at which the Job Preparation Task completed. + This property is set only if the Task is in the Completed state. + :type end_time: datetime + :param state: Required. The current state of the Job Preparation Task on + the Compute Node. Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.JobPreparationTaskState + :param task_root_directory: The root directory of the Job Preparation Task + on the Compute Node. You can use this path to retrieve files created by + the Task, such as log files. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the Job + Preparation Task on the Compute Node. + :type task_root_directory_url: str + :param exit_code: The exit code of the program specified on the Task + command line. This parameter is returned only if the Task is in the + completed state. The exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. Note + that the exit code may also be generated by the Compute Node operating + system, such as when a process is forcibly terminated. + :type exit_code: int + :param container_info: Information about the container under which the + Task is executing. This property is set only if the Task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the Task failure, if any. This + property is set only if the Task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the Task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the Task could not be run) and + file upload errors are not retried. The Batch service will retry the Task + up to the limit specified by the constraints. Task application failures + (non-zero exit code) are retried, pre-processing errors (the Task could + not be run) and file upload errors are not retried. The Batch service will + retry the Task up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the Job + Preparation Task started running. This property is set only if the Task + was retried (i.e. retryCount is nonzero). If present, this is typically + the same as startTime, but may be different if the Task has been restarted + for reasons other than retry; for example, if the Compute Node was + rebooted during a retry, then the startTime is updated but the + lastRetryTime is not. + :type last_retry_time: datetime + :param result: The result of the Task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'start_time': {'required': True}, + 'state': {'required': True}, + 'retry_count': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobPreparationTaskState'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, **kwargs): + super(JobPreparationTaskExecutionInformation, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.state = kwargs.get('state', None) + self.task_root_directory = kwargs.get('task_root_directory', None) + self.task_root_directory_url = kwargs.get('task_root_directory_url', None) + self.exit_code = kwargs.get('exit_code', None) + self.container_info = kwargs.get('container_info', None) + self.failure_info = kwargs.get('failure_info', None) + self.retry_count = kwargs.get('retry_count', None) + self.last_retry_time = kwargs.get('last_retry_time', None) + self.result = kwargs.get('result', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_preparation_task_execution_information_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_preparation_task_execution_information_py3.py new file mode 100644 index 00000000..373d5b23 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_preparation_task_execution_information_py3.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationTaskExecutionInformation(Model): + """Contains information about the execution of a Job Preparation Task on a + Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The time at which the Task started running. + If the Task has been restarted or retried, this is the most recent time at + which the Task started running. + :type start_time: datetime + :param end_time: The time at which the Job Preparation Task completed. + This property is set only if the Task is in the Completed state. + :type end_time: datetime + :param state: Required. The current state of the Job Preparation Task on + the Compute Node. Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.JobPreparationTaskState + :param task_root_directory: The root directory of the Job Preparation Task + on the Compute Node. You can use this path to retrieve files created by + the Task, such as log files. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the Job + Preparation Task on the Compute Node. + :type task_root_directory_url: str + :param exit_code: The exit code of the program specified on the Task + command line. This parameter is returned only if the Task is in the + completed state. The exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. Note + that the exit code may also be generated by the Compute Node operating + system, such as when a process is forcibly terminated. + :type exit_code: int + :param container_info: Information about the container under which the + Task is executing. This property is set only if the Task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the Task failure, if any. This + property is set only if the Task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the Task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the Task could not be run) and + file upload errors are not retried. The Batch service will retry the Task + up to the limit specified by the constraints. Task application failures + (non-zero exit code) are retried, pre-processing errors (the Task could + not be run) and file upload errors are not retried. The Batch service will + retry the Task up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the Job + Preparation Task started running. This property is set only if the Task + was retried (i.e. retryCount is nonzero). If present, this is typically + the same as startTime, but may be different if the Task has been restarted + for reasons other than retry; for example, if the Compute Node was + rebooted during a retry, then the startTime is updated but the + lastRetryTime is not. + :type last_retry_time: datetime + :param result: The result of the Task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'start_time': {'required': True}, + 'state': {'required': True}, + 'retry_count': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobPreparationTaskState'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, *, start_time, state, retry_count: int, end_time=None, task_root_directory: str=None, task_root_directory_url: str=None, exit_code: int=None, container_info=None, failure_info=None, last_retry_time=None, result=None, **kwargs) -> None: + super(JobPreparationTaskExecutionInformation, self).__init__(**kwargs) + self.start_time = start_time + self.end_time = end_time + self.state = state + self.task_root_directory = task_root_directory + self.task_root_directory_url = task_root_directory_url + self.exit_code = exit_code + self.container_info = container_info + self.failure_info = failure_info + self.retry_count = retry_count + self.last_retry_time = last_retry_time + self.result = result diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_preparation_task_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_preparation_task_py3.py new file mode 100644 index 00000000..53dc875d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_preparation_task_py3.py @@ -0,0 +1,147 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobPreparationTask(Model): + """A Job Preparation Task to run before any Tasks of the Job on any given + Compute Node. + + You can use Job Preparation to prepare a Node to run Tasks for the Job. + Activities commonly performed in Job Preparation include: Downloading + common resource files used by all the Tasks in the Job. The Job Preparation + Task can download these common resource files to the shared location on the + Node. (AZ_BATCH_NODE_ROOT_DIR\shared), or starting a local service on the + Node so that all Tasks of that Job can communicate with it. If the Job + Preparation Task fails (that is, exhausts its retry count before exiting + with exit code 0), Batch will not run Tasks of this Job on the Node. The + Compute Node remains ineligible to run Tasks of this Job until it is + reimaged. The Compute Node remains active and can be used for other Jobs. + The Job Preparation Task can run multiple times on the same Node. + Therefore, you should write the Job Preparation Task to handle + re-execution. If the Node is rebooted, the Job Preparation Task is run + again on the Compute Node before scheduling any other Task of the Job, if + rerunOnNodeRebootAfterSuccess is true or if the Job Preparation Task did + not previously complete. If the Node is reimaged, the Job Preparation Task + is run again before scheduling any Task of the Job. Batch will retry Tasks + when a recovery operation is triggered on a Node. Examples of recovery + operations include (but are not limited to) when an unhealthy Node is + rebooted or a Compute Node disappeared due to host failure. Retries due to + recovery operations are independent of and are not counted against the + maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal retry + due to a recovery operation may occur. Because of this, all Tasks should be + idempotent. This means Tasks need to tolerate being interrupted and + restarted without causing any corruption or duplicate data. The best + practice for long running Tasks is to use some form of checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: A string that uniquely identifies the Job Preparation Task + within the Job. The ID can contain any combination of alphanumeric + characters including hyphens and underscores and cannot contain more than + 64 characters. If you do not specify this property, the Batch service + assigns a default value of 'jobpreparation'. No other Task in the Job can + have the same ID as the Job Preparation Task. If you try to submit a Task + with the same id, the Batch service rejects the request with error code + TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, + the HTTP status code is 409 (Conflict). + :type id: str + :param command_line: Required. The command line of the Job Preparation + Task. The command line does not run under a shell, and therefore cannot + take advantage of shell features such as environment variable expansion. + If you want to take advantage of such features, you should invoke the + shell in the command line, for example using "cmd /c MyCommand" in Windows + or "/bin/sh -c MyCommand" in Linux. If the command line refers to file + paths, it should use a relative path (relative to the Task working + directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Preparation Task runs. When this is specified, all directories + recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch + directories on the node) are mapped into the container, all Task + environment variables are mapped into the container, and the Task command + line is executed in the container. Files produced in the container outside + of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning + that Batch file APIs will not be able to access those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. Files listed + under this element are located in the Task's working directory. There is + a maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the Job Preparation Task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param constraints: Constraints that apply to the Job Preparation Task. + :type constraints: ~azure.batch.models.TaskConstraints + :param wait_for_success: Whether the Batch service should wait for the Job + Preparation Task to complete successfully before scheduling any other + Tasks of the Job on the Compute Node. A Job Preparation Task has completed + successfully if it exits with exit code 0. If true and the Job Preparation + Task fails on a Node, the Batch service retries the Job Preparation Task + up to its maximum retry count (as specified in the constraints element). + If the Task has still not completed successfully after all retries, then + the Batch service will not schedule Tasks of the Job to the Node. The Node + remains active and eligible to run Tasks of other Jobs. If false, the + Batch service will not wait for the Job Preparation Task to complete. In + this case, other Tasks of the Job can start executing on the Compute Node + while the Job Preparation Task is still running; and even if the Job + Preparation Task fails, new Tasks will continue to be scheduled on the + Compute Node. The default value is true. + :type wait_for_success: bool + :param user_identity: The user identity under which the Job Preparation + Task runs. If omitted, the Task runs as a non-administrative user unique + to the Task on Windows Compute Nodes, or a non-administrative user unique + to the Pool on Linux Compute Nodes. + :type user_identity: ~azure.batch.models.UserIdentity + :param rerun_on_node_reboot_after_success: Whether the Batch service + should rerun the Job Preparation Task after a Compute Node reboots. The + Job Preparation Task is always rerun if a Compute Node is reimaged, or if + the Job Preparation Task did not complete (e.g. because the reboot + occurred while the Task was running). Therefore, you should always write a + Job Preparation Task to be idempotent and to behave correctly if run + multiple times. The default value is true. + :type rerun_on_node_reboot_after_success: bool + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'rerun_on_node_reboot_after_success': {'key': 'rerunOnNodeRebootAfterSuccess', 'type': 'bool'}, + } + + def __init__(self, *, command_line: str, id: str=None, container_settings=None, resource_files=None, environment_settings=None, constraints=None, wait_for_success: bool=None, user_identity=None, rerun_on_node_reboot_after_success: bool=None, **kwargs) -> None: + super(JobPreparationTask, self).__init__(**kwargs) + self.id = id + self.command_line = command_line + self.container_settings = container_settings + self.resource_files = resource_files + self.environment_settings = environment_settings + self.constraints = constraints + self.wait_for_success = wait_for_success + self.user_identity = user_identity + self.rerun_on_node_reboot_after_success = rerun_on_node_reboot_after_success diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_release_task.py b/azext/generated/sdk/batch/v2019_08_01/models/job_release_task.py new file mode 100644 index 00000000..b68410a1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_release_task.py @@ -0,0 +1,123 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobReleaseTask(Model): + """A Job Release Task to run on Job completion on any Compute Node where the + Job has run. + + The Job Release Task runs when the Job ends, because of one of the + following: The user calls the Terminate Job API, or the Delete Job API + while the Job is still active, the Job's maximum wall clock time constraint + is reached, and the Job is still active, or the Job's Job Manager Task + completed, and the Job is configured to terminate when the Job Manager + completes. The Job Release Task runs on each Node where Tasks of the Job + have run and the Job Preparation Task ran and completed. If you reimage a + Node after it has run the Job Preparation Task, and the Job ends without + any further Tasks of the Job running on that Node (and hence the Job + Preparation Task does not re-run), then the Job Release Task does not run + on that Compute Node. If a Node reboots while the Job Release Task is still + running, the Job Release Task runs again when the Compute Node starts up. + The Job is not marked as complete until all Job Release Tasks have + completed. The Job Release Task runs in the background. It does not occupy + a scheduling slot; that is, it does not count towards the maxTasksPerNode + limit specified on the Pool. + + All required parameters must be populated in order to send to Azure. + + :param id: A string that uniquely identifies the Job Release Task within + the Job. The ID can contain any combination of alphanumeric characters + including hyphens and underscores and cannot contain more than 64 + characters. If you do not specify this property, the Batch service assigns + a default value of 'jobrelease'. No other Task in the Job can have the + same ID as the Job Release Task. If you try to submit a Task with the same + id, the Batch service rejects the request with error code + TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the + HTTP status code is 409 (Conflict). + :type id: str + :param command_line: Required. The command line of the Job Release Task. + The command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the Task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Release Task runs. When this is specified, all directories recursively + below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on + the node) are mapped into the container, all Task environment variables + are mapped into the container, and the Task command line is executed in + the container. Files produced in the container outside of + AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning + that Batch file APIs will not be able to access those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. Files listed under this + element are located in the Task's working directory. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the Job Release Task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param max_wall_clock_time: The maximum elapsed time that the Job Release + Task may run on a given Compute Node, measured from the time the Task + starts. If the Task does not complete within the time limit, the Batch + service terminates it. The default value is 15 minutes. You may not + specify a timeout longer than 15 minutes. If you do, the Batch service + rejects it with an error; if you are calling the REST API directly, the + HTTP status code is 400 (Bad Request). + :type max_wall_clock_time: timedelta + :param retention_time: The minimum time to retain the Task directory for + the Job Release Task on the Compute Node. After this time, the Batch + service may delete the Task directory and all its contents. The default is + 7 days, i.e. the Task directory will be retained for 7 days unless the + Compute Node is removed or the Job is deleted. + :type retention_time: timedelta + :param user_identity: The user identity under which the Job Release Task + runs. If omitted, the Task runs as a non-administrative user unique to the + Task. + :type user_identity: ~azure.batch.models.UserIdentity + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'retention_time': {'key': 'retentionTime', 'type': 'duration'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + } + + def __init__(self, **kwargs): + super(JobReleaseTask, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.resource_files = kwargs.get('resource_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.max_wall_clock_time = kwargs.get('max_wall_clock_time', None) + self.retention_time = kwargs.get('retention_time', None) + self.user_identity = kwargs.get('user_identity', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_release_task_execution_information.py b/azext/generated/sdk/batch/v2019_08_01/models/job_release_task_execution_information.py new file mode 100644 index 00000000..0cb6fc16 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_release_task_execution_information.py @@ -0,0 +1,89 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobReleaseTaskExecutionInformation(Model): + """Contains information about the execution of a Job Release Task on a Compute + Node. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The time at which the Task started running. + If the Task has been restarted or retried, this is the most recent time at + which the Task started running. + :type start_time: datetime + :param end_time: The time at which the Job Release Task completed. This + property is set only if the Task is in the Completed state. + :type end_time: datetime + :param state: Required. The current state of the Job Release Task on the + Compute Node. Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.JobReleaseTaskState + :param task_root_directory: The root directory of the Job Release Task on + the Compute Node. You can use this path to retrieve files created by the + Task, such as log files. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the Job + Release Task on the Compute Node. + :type task_root_directory_url: str + :param exit_code: The exit code of the program specified on the Task + command line. This parameter is returned only if the Task is in the + completed state. The exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. Note + that the exit code may also be generated by the Compute Node operating + system, such as when a process is forcibly terminated. + :type exit_code: int + :param container_info: Information about the container under which the + Task is executing. This property is set only if the Task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the Task failure, if any. This + property is set only if the Task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param result: The result of the Task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'start_time': {'required': True}, + 'state': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobReleaseTaskState'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, **kwargs): + super(JobReleaseTaskExecutionInformation, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.state = kwargs.get('state', None) + self.task_root_directory = kwargs.get('task_root_directory', None) + self.task_root_directory_url = kwargs.get('task_root_directory_url', None) + self.exit_code = kwargs.get('exit_code', None) + self.container_info = kwargs.get('container_info', None) + self.failure_info = kwargs.get('failure_info', None) + self.result = kwargs.get('result', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_release_task_execution_information_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_release_task_execution_information_py3.py new file mode 100644 index 00000000..f83c2895 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_release_task_execution_information_py3.py @@ -0,0 +1,89 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobReleaseTaskExecutionInformation(Model): + """Contains information about the execution of a Job Release Task on a Compute + Node. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The time at which the Task started running. + If the Task has been restarted or retried, this is the most recent time at + which the Task started running. + :type start_time: datetime + :param end_time: The time at which the Job Release Task completed. This + property is set only if the Task is in the Completed state. + :type end_time: datetime + :param state: Required. The current state of the Job Release Task on the + Compute Node. Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.JobReleaseTaskState + :param task_root_directory: The root directory of the Job Release Task on + the Compute Node. You can use this path to retrieve files created by the + Task, such as log files. + :type task_root_directory: str + :param task_root_directory_url: The URL to the root directory of the Job + Release Task on the Compute Node. + :type task_root_directory_url: str + :param exit_code: The exit code of the program specified on the Task + command line. This parameter is returned only if the Task is in the + completed state. The exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. Note + that the exit code may also be generated by the Compute Node operating + system, such as when a process is forcibly terminated. + :type exit_code: int + :param container_info: Information about the container under which the + Task is executing. This property is set only if the Task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the Task failure, if any. This + property is set only if the Task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param result: The result of the Task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'start_time': {'required': True}, + 'state': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobReleaseTaskState'}, + 'task_root_directory': {'key': 'taskRootDirectory', 'type': 'str'}, + 'task_root_directory_url': {'key': 'taskRootDirectoryUrl', 'type': 'str'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, *, start_time, state, end_time=None, task_root_directory: str=None, task_root_directory_url: str=None, exit_code: int=None, container_info=None, failure_info=None, result=None, **kwargs) -> None: + super(JobReleaseTaskExecutionInformation, self).__init__(**kwargs) + self.start_time = start_time + self.end_time = end_time + self.state = state + self.task_root_directory = task_root_directory + self.task_root_directory_url = task_root_directory_url + self.exit_code = exit_code + self.container_info = container_info + self.failure_info = failure_info + self.result = result diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_release_task_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_release_task_py3.py new file mode 100644 index 00000000..70e7615c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_release_task_py3.py @@ -0,0 +1,123 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobReleaseTask(Model): + """A Job Release Task to run on Job completion on any Compute Node where the + Job has run. + + The Job Release Task runs when the Job ends, because of one of the + following: The user calls the Terminate Job API, or the Delete Job API + while the Job is still active, the Job's maximum wall clock time constraint + is reached, and the Job is still active, or the Job's Job Manager Task + completed, and the Job is configured to terminate when the Job Manager + completes. The Job Release Task runs on each Node where Tasks of the Job + have run and the Job Preparation Task ran and completed. If you reimage a + Node after it has run the Job Preparation Task, and the Job ends without + any further Tasks of the Job running on that Node (and hence the Job + Preparation Task does not re-run), then the Job Release Task does not run + on that Compute Node. If a Node reboots while the Job Release Task is still + running, the Job Release Task runs again when the Compute Node starts up. + The Job is not marked as complete until all Job Release Tasks have + completed. The Job Release Task runs in the background. It does not occupy + a scheduling slot; that is, it does not count towards the maxTasksPerNode + limit specified on the Pool. + + All required parameters must be populated in order to send to Azure. + + :param id: A string that uniquely identifies the Job Release Task within + the Job. The ID can contain any combination of alphanumeric characters + including hyphens and underscores and cannot contain more than 64 + characters. If you do not specify this property, the Batch service assigns + a default value of 'jobrelease'. No other Task in the Job can have the + same ID as the Job Release Task. If you try to submit a Task with the same + id, the Batch service rejects the request with error code + TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the + HTTP status code is 409 (Conflict). + :type id: str + :param command_line: Required. The command line of the Job Release Task. + The command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the Task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Job Release Task runs. When this is specified, all directories recursively + below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on + the node) are mapped into the container, all Task environment variables + are mapped into the container, and the Task command line is executed in + the container. Files produced in the container outside of + AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning + that Batch file APIs will not be able to access those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. Files listed under this + element are located in the Task's working directory. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the Job Release Task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param max_wall_clock_time: The maximum elapsed time that the Job Release + Task may run on a given Compute Node, measured from the time the Task + starts. If the Task does not complete within the time limit, the Batch + service terminates it. The default value is 15 minutes. You may not + specify a timeout longer than 15 minutes. If you do, the Batch service + rejects it with an error; if you are calling the REST API directly, the + HTTP status code is 400 (Bad Request). + :type max_wall_clock_time: timedelta + :param retention_time: The minimum time to retain the Task directory for + the Job Release Task on the Compute Node. After this time, the Batch + service may delete the Task directory and all its contents. The default is + 7 days, i.e. the Task directory will be retained for 7 days unless the + Compute Node is removed or the Job is deleted. + :type retention_time: timedelta + :param user_identity: The user identity under which the Job Release Task + runs. If omitted, the Task runs as a non-administrative user unique to the + Task. + :type user_identity: ~azure.batch.models.UserIdentity + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'retention_time': {'key': 'retentionTime', 'type': 'duration'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + } + + def __init__(self, *, command_line: str, id: str=None, container_settings=None, resource_files=None, environment_settings=None, max_wall_clock_time=None, retention_time=None, user_identity=None, **kwargs) -> None: + super(JobReleaseTask, self).__init__(**kwargs) + self.id = id + self.command_line = command_line + self.container_settings = container_settings + self.resource_files = resource_files + self.environment_settings = environment_settings + self.max_wall_clock_time = max_wall_clock_time + self.retention_time = retention_time + self.user_identity = user_identity diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_add_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_add_options.py new file mode 100644 index 00000000..6c03aaff --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_add_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleAddOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_add_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_add_options_py3.py new file mode 100644 index 00000000..fe7b76cc --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_add_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobScheduleAddOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_add_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_add_parameter.py new file mode 100644 index 00000000..3a38b677 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_add_parameter.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleAddParameter(Model): + """A Job Schedule that allows recurring Jobs by specifying when to run Jobs + and a specification used to create each Job. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the schedule within + the Account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within an Account that differ only by case). + :type id: str + :param display_name: The display name for the schedule. The display name + need not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param schedule: Required. The schedule according to which Jobs will be + created. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: Required. The details of the Jobs to be created + on this schedule. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the schedule + as metadata. The Batch service does not assign any meaning to metadata; it + is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'id': {'required': True}, + 'schedule': {'required': True}, + 'job_specification': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(JobScheduleAddParameter, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.schedule = kwargs.get('schedule', None) + self.job_specification = kwargs.get('job_specification', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_add_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_add_parameter_py3.py new file mode 100644 index 00000000..fe16af9b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_add_parameter_py3.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleAddParameter(Model): + """A Job Schedule that allows recurring Jobs by specifying when to run Jobs + and a specification used to create each Job. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the schedule within + the Account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within an Account that differ only by case). + :type id: str + :param display_name: The display name for the schedule. The display name + need not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param schedule: Required. The schedule according to which Jobs will be + created. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: Required. The details of the Jobs to be created + on this schedule. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the schedule + as metadata. The Batch service does not assign any meaning to metadata; it + is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'id': {'required': True}, + 'schedule': {'required': True}, + 'job_specification': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, id: str, schedule, job_specification, display_name: str=None, metadata=None, **kwargs) -> None: + super(JobScheduleAddParameter, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.schedule = schedule + self.job_specification = job_specification + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_delete_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_delete_options.py new file mode 100644 index 00000000..a7e01118 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_delete_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleDeleteOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_delete_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_delete_options_py3.py new file mode 100644 index 00000000..89ae9986 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_delete_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleDeleteOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_disable_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_disable_options.py new file mode 100644 index 00000000..9384c1fb --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_disable_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleDisableOptions(Model): + """Additional parameters for disable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleDisableOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_disable_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_disable_options_py3.py new file mode 100644 index 00000000..83adbe53 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_disable_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleDisableOptions(Model): + """Additional parameters for disable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleDisableOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_enable_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_enable_options.py new file mode 100644 index 00000000..a296d530 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_enable_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleEnableOptions(Model): + """Additional parameters for enable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleEnableOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_enable_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_enable_options_py3.py new file mode 100644 index 00000000..daa4d087 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_enable_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleEnableOptions(Model): + """Additional parameters for enable operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleEnableOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_execution_information.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_execution_information.py new file mode 100644 index 00000000..1e38ec3d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_execution_information.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleExecutionInformation(Model): + """Contains information about Jobs that have been and will be run under a Job + Schedule. + + :param next_run_time: The next time at which a Job will be created under + this schedule. This property is meaningful only if the schedule is in the + active state when the time comes around. For example, if the schedule is + disabled, no Job will be created at nextRunTime unless the Job is enabled + before then. + :type next_run_time: datetime + :param recent_job: Information about the most recent Job under the Job + Schedule. This property is present only if the at least one Job has run + under the schedule. + :type recent_job: ~azure.batch.models.RecentJob + :param end_time: The time at which the schedule ended. This property is + set only if the Job Schedule is in the completed state. + :type end_time: datetime + """ + + _attribute_map = { + 'next_run_time': {'key': 'nextRunTime', 'type': 'iso-8601'}, + 'recent_job': {'key': 'recentJob', 'type': 'RecentJob'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(JobScheduleExecutionInformation, self).__init__(**kwargs) + self.next_run_time = kwargs.get('next_run_time', None) + self.recent_job = kwargs.get('recent_job', None) + self.end_time = kwargs.get('end_time', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_execution_information_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_execution_information_py3.py new file mode 100644 index 00000000..bfc1afc3 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_execution_information_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleExecutionInformation(Model): + """Contains information about Jobs that have been and will be run under a Job + Schedule. + + :param next_run_time: The next time at which a Job will be created under + this schedule. This property is meaningful only if the schedule is in the + active state when the time comes around. For example, if the schedule is + disabled, no Job will be created at nextRunTime unless the Job is enabled + before then. + :type next_run_time: datetime + :param recent_job: Information about the most recent Job under the Job + Schedule. This property is present only if the at least one Job has run + under the schedule. + :type recent_job: ~azure.batch.models.RecentJob + :param end_time: The time at which the schedule ended. This property is + set only if the Job Schedule is in the completed state. + :type end_time: datetime + """ + + _attribute_map = { + 'next_run_time': {'key': 'nextRunTime', 'type': 'iso-8601'}, + 'recent_job': {'key': 'recentJob', 'type': 'RecentJob'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + } + + def __init__(self, *, next_run_time=None, recent_job=None, end_time=None, **kwargs) -> None: + super(JobScheduleExecutionInformation, self).__init__(**kwargs) + self.next_run_time = next_run_time + self.recent_job = recent_job + self.end_time = end_time diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_exists_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_exists_options.py new file mode 100644 index 00000000..c4f228d7 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_exists_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleExistsOptions(Model): + """Additional parameters for exists operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleExistsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_exists_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_exists_options_py3.py new file mode 100644 index 00000000..da8e15d2 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_exists_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleExistsOptions(Model): + """Additional parameters for exists operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleExistsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_get_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_get_options.py new file mode 100644 index 00000000..434b0ab1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_get_options.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_get_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_get_options_py3.py new file mode 100644 index 00000000..11ee540f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_get_options_py3.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleGetOptions, self).__init__(**kwargs) + self.select = select + self.expand = expand + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_list_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_list_options.py new file mode 100644 index 00000000..4778b41e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_list_options.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Job Schedules can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_list_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_list_options_py3.py new file mode 100644 index 00000000..834a0af2 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_list_options_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Job Schedules can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(JobScheduleListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.expand = expand + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_patch_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_patch_options.py new file mode 100644 index 00000000..841e56e9 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_patch_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulePatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobSchedulePatchOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_patch_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_patch_options_py3.py new file mode 100644 index 00000000..06e4f626 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_patch_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulePatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobSchedulePatchOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_patch_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_patch_parameter.py new file mode 100644 index 00000000..f0cbdf2f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_patch_parameter.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulePatchParameter(Model): + """The set of changes to be made to a Job Schedule. + + :param schedule: The schedule according to which Jobs will be created. If + you do not specify this element, the existing schedule is left unchanged. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: The details of the Jobs to be created on this + schedule. Updates affect only Jobs that are started after the update has + taken place. Any currently active Job continues with the older + specification. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the Job + Schedule as metadata. If you do not specify this element, existing + metadata is left unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(JobSchedulePatchParameter, self).__init__(**kwargs) + self.schedule = kwargs.get('schedule', None) + self.job_specification = kwargs.get('job_specification', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_patch_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_patch_parameter_py3.py new file mode 100644 index 00000000..a9694ade --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_patch_parameter_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulePatchParameter(Model): + """The set of changes to be made to a Job Schedule. + + :param schedule: The schedule according to which Jobs will be created. If + you do not specify this element, the existing schedule is left unchanged. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: The details of the Jobs to be created on this + schedule. Updates affect only Jobs that are started after the update has + taken place. Any currently active Job continues with the older + specification. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the Job + Schedule as metadata. If you do not specify this element, existing + metadata is left unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, schedule=None, job_specification=None, metadata=None, **kwargs) -> None: + super(JobSchedulePatchParameter, self).__init__(**kwargs) + self.schedule = schedule + self.job_specification = job_specification + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_statistics.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_statistics.py new file mode 100644 index 00000000..ea33b382 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_statistics.py @@ -0,0 +1,126 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleStatistics(Model): + """Resource usage statistics for a Job Schedule. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all Compute Nodes) consumed by all Tasks in all Jobs + created under the schedule. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all Compute Nodes) consumed by all Tasks in all Jobs + created under the schedule. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of all the + Tasks in all the Jobs created under the schedule. The wall clock time is + the elapsed time from when the Task started running on a Compute Node to + when it finished (or to the last time the statistics were updated, if the + Task had not finished by then). If a Task was retried, this includes the + wall clock time of all the Task retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by all Tasks in all Jobs created under the schedule. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by all Tasks in all Jobs created under the schedule. + :type write_iops: long + :param read_io_gi_b: Required. The total gibibytes read from disk by all + Tasks in all Jobs created under the schedule. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total gibibytes written to disk by all + Tasks in all Jobs created under the schedule. + :type write_io_gi_b: float + :param num_succeeded_tasks: Required. The total number of Tasks + successfully completed during the given time range in Jobs created under + the schedule. A Task completes successfully if it returns exit code 0. + :type num_succeeded_tasks: long + :param num_failed_tasks: Required. The total number of Tasks that failed + during the given time range in Jobs created under the schedule. A Task + fails if it exhausts its maximum retry count without returning exit code + 0. + :type num_failed_tasks: long + :param num_task_retries: Required. The total number of retries during the + given time range on all Tasks in all Jobs created under the schedule. + :type num_task_retries: long + :param wait_time: Required. The total wait time of all Tasks in all Jobs + created under the schedule. The wait time for a Task is defined as the + elapsed time between the creation of the Task and the start of Task + execution. (If the Task is retried due to failures, the wait time is the + time to the most recent Task execution.). This value is only reported in + the Account lifetime statistics; it is not included in the Job statistics. + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'num_succeeded_tasks': {'required': True}, + 'num_failed_tasks': {'required': True}, + 'num_task_retries': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'num_succeeded_tasks': {'key': 'numSucceededTasks', 'type': 'long'}, + 'num_failed_tasks': {'key': 'numFailedTasks', 'type': 'long'}, + 'num_task_retries': {'key': 'numTaskRetries', 'type': 'long'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(JobScheduleStatistics, self).__init__(**kwargs) + self.url = kwargs.get('url', None) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.user_cpu_time = kwargs.get('user_cpu_time', None) + self.kernel_cpu_time = kwargs.get('kernel_cpu_time', None) + self.wall_clock_time = kwargs.get('wall_clock_time', None) + self.read_iops = kwargs.get('read_iops', None) + self.write_iops = kwargs.get('write_iops', None) + self.read_io_gi_b = kwargs.get('read_io_gi_b', None) + self.write_io_gi_b = kwargs.get('write_io_gi_b', None) + self.num_succeeded_tasks = kwargs.get('num_succeeded_tasks', None) + self.num_failed_tasks = kwargs.get('num_failed_tasks', None) + self.num_task_retries = kwargs.get('num_task_retries', None) + self.wait_time = kwargs.get('wait_time', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_statistics_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_statistics_py3.py new file mode 100644 index 00000000..15bdc089 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_statistics_py3.py @@ -0,0 +1,126 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleStatistics(Model): + """Resource usage statistics for a Job Schedule. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all Compute Nodes) consumed by all Tasks in all Jobs + created under the schedule. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all Compute Nodes) consumed by all Tasks in all Jobs + created under the schedule. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of all the + Tasks in all the Jobs created under the schedule. The wall clock time is + the elapsed time from when the Task started running on a Compute Node to + when it finished (or to the last time the statistics were updated, if the + Task had not finished by then). If a Task was retried, this includes the + wall clock time of all the Task retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by all Tasks in all Jobs created under the schedule. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by all Tasks in all Jobs created under the schedule. + :type write_iops: long + :param read_io_gi_b: Required. The total gibibytes read from disk by all + Tasks in all Jobs created under the schedule. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total gibibytes written to disk by all + Tasks in all Jobs created under the schedule. + :type write_io_gi_b: float + :param num_succeeded_tasks: Required. The total number of Tasks + successfully completed during the given time range in Jobs created under + the schedule. A Task completes successfully if it returns exit code 0. + :type num_succeeded_tasks: long + :param num_failed_tasks: Required. The total number of Tasks that failed + during the given time range in Jobs created under the schedule. A Task + fails if it exhausts its maximum retry count without returning exit code + 0. + :type num_failed_tasks: long + :param num_task_retries: Required. The total number of retries during the + given time range on all Tasks in all Jobs created under the schedule. + :type num_task_retries: long + :param wait_time: Required. The total wait time of all Tasks in all Jobs + created under the schedule. The wait time for a Task is defined as the + elapsed time between the creation of the Task and the start of Task + execution. (If the Task is retried due to failures, the wait time is the + time to the most recent Task execution.). This value is only reported in + the Account lifetime statistics; it is not included in the Job statistics. + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'num_succeeded_tasks': {'required': True}, + 'num_failed_tasks': {'required': True}, + 'num_task_retries': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'num_succeeded_tasks': {'key': 'numSucceededTasks', 'type': 'long'}, + 'num_failed_tasks': {'key': 'numFailedTasks', 'type': 'long'}, + 'num_task_retries': {'key': 'numTaskRetries', 'type': 'long'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, *, url: str, start_time, last_update_time, user_cpu_time, kernel_cpu_time, wall_clock_time, read_iops: int, write_iops: int, read_io_gi_b: float, write_io_gi_b: float, num_succeeded_tasks: int, num_failed_tasks: int, num_task_retries: int, wait_time, **kwargs) -> None: + super(JobScheduleStatistics, self).__init__(**kwargs) + self.url = url + self.start_time = start_time + self.last_update_time = last_update_time + self.user_cpu_time = user_cpu_time + self.kernel_cpu_time = kernel_cpu_time + self.wall_clock_time = wall_clock_time + self.read_iops = read_iops + self.write_iops = write_iops + self.read_io_gi_b = read_io_gi_b + self.write_io_gi_b = write_io_gi_b + self.num_succeeded_tasks = num_succeeded_tasks + self.num_failed_tasks = num_failed_tasks + self.num_task_retries = num_task_retries + self.wait_time = wait_time diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_terminate_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_terminate_options.py new file mode 100644 index 00000000..32a6f0d7 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_terminate_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleTerminateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_terminate_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_terminate_options_py3.py new file mode 100644 index 00000000..54789876 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_terminate_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleTerminateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_update_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_update_options.py new file mode 100644 index 00000000..ca3de898 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_update_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobScheduleUpdateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_update_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_update_options_py3.py new file mode 100644 index 00000000..aee92988 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_update_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobScheduleUpdateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_update_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_update_parameter.py new file mode 100644 index 00000000..bb01f620 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_update_parameter.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleUpdateParameter(Model): + """The set of changes to be made to a Job Schedule. + + All required parameters must be populated in order to send to Azure. + + :param schedule: Required. The schedule according to which Jobs will be + created. If you do not specify this element, it is equivalent to passing + the default schedule: that is, a single Job scheduled to run immediately. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: Required. Details of the Jobs to be created on + this schedule. Updates affect only Jobs that are started after the update + has taken place. Any currently active Job continues with the older + specification. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the Job + Schedule as metadata. If you do not specify this element, it takes the + default value of an empty list; in effect, any existing metadata is + deleted. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'schedule': {'required': True}, + 'job_specification': {'required': True}, + } + + _attribute_map = { + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(JobScheduleUpdateParameter, self).__init__(**kwargs) + self.schedule = kwargs.get('schedule', None) + self.job_specification = kwargs.get('job_specification', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_update_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_update_parameter_py3.py new file mode 100644 index 00000000..40195e0f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_schedule_update_parameter_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobScheduleUpdateParameter(Model): + """The set of changes to be made to a Job Schedule. + + All required parameters must be populated in order to send to Azure. + + :param schedule: Required. The schedule according to which Jobs will be + created. If you do not specify this element, it is equivalent to passing + the default schedule: that is, a single Job scheduled to run immediately. + :type schedule: ~azure.batch.models.Schedule + :param job_specification: Required. Details of the Jobs to be created on + this schedule. Updates affect only Jobs that are started after the update + has taken place. Any currently active Job continues with the older + specification. + :type job_specification: ~azure.batch.models.JobSpecification + :param metadata: A list of name-value pairs associated with the Job + Schedule as metadata. If you do not specify this element, it takes the + default value of an empty list; in effect, any existing metadata is + deleted. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'schedule': {'required': True}, + 'job_specification': {'required': True}, + } + + _attribute_map = { + 'schedule': {'key': 'schedule', 'type': 'Schedule'}, + 'job_specification': {'key': 'jobSpecification', 'type': 'JobSpecification'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, schedule, job_specification, metadata=None, **kwargs) -> None: + super(JobScheduleUpdateParameter, self).__init__(**kwargs) + self.schedule = schedule + self.job_specification = job_specification + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_scheduling_error.py b/azext/generated/sdk/batch/v2019_08_01/models/job_scheduling_error.py new file mode 100644 index 00000000..ff82e783 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_scheduling_error.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulingError(Model): + """An error encountered by the Batch service when scheduling a Job. + + All required parameters must be populated in order to send to Azure. + + :param category: Required. The category of the Job scheduling error. + Possible values include: 'userError', 'serverError' + :type category: str or ~azure.batch.models.ErrorCategory + :param code: An identifier for the Job scheduling error. Codes are + invariant and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the Job scheduling error, intended to + be suitable for display in a user interface. + :type message: str + :param details: A list of additional error details related to the + scheduling error. + :type details: list[~azure.batch.models.NameValuePair] + """ + + _validation = { + 'category': {'required': True}, + } + + _attribute_map = { + 'category': {'key': 'category', 'type': 'ErrorCategory'}, + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(JobSchedulingError, self).__init__(**kwargs) + self.category = kwargs.get('category', None) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.details = kwargs.get('details', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_scheduling_error_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_scheduling_error_py3.py new file mode 100644 index 00000000..2d635a17 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_scheduling_error_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSchedulingError(Model): + """An error encountered by the Batch service when scheduling a Job. + + All required parameters must be populated in order to send to Azure. + + :param category: Required. The category of the Job scheduling error. + Possible values include: 'userError', 'serverError' + :type category: str or ~azure.batch.models.ErrorCategory + :param code: An identifier for the Job scheduling error. Codes are + invariant and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the Job scheduling error, intended to + be suitable for display in a user interface. + :type message: str + :param details: A list of additional error details related to the + scheduling error. + :type details: list[~azure.batch.models.NameValuePair] + """ + + _validation = { + 'category': {'required': True}, + } + + _attribute_map = { + 'category': {'key': 'category', 'type': 'ErrorCategory'}, + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, category, code: str=None, message: str=None, details=None, **kwargs) -> None: + super(JobSchedulingError, self).__init__(**kwargs) + self.category = category + self.code = code + self.message = message + self.details = details diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_specification.py b/azext/generated/sdk/batch/v2019_08_01/models/job_specification.py new file mode 100644 index 00000000..5cbe16fc --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_specification.py @@ -0,0 +1,129 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSpecification(Model): + """Specifies details of the Jobs to be created on a schedule. + + All required parameters must be populated in order to send to Azure. + + :param priority: The priority of Jobs created under this schedule. + Priority values can range from -1000 to 1000, with -1000 being the lowest + priority and 1000 being the highest priority. The default value is 0. This + priority is used as the default for all Jobs under the Job Schedule. You + can update a Job's priority after it has been created using by using the + update Job API. + :type priority: int + :param display_name: The display name for Jobs created under this + schedule. The name need not be unique and can contain any Unicode + characters up to a maximum length of 1024. + :type display_name: str + :param uses_task_dependencies: Whether Tasks in the Job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + :param on_all_tasks_complete: The action the Batch service should take + when all Tasks in a Job created under this schedule are in the completed + state. Note that if a Job contains no Tasks, then all Tasks are considered + complete. This option is therefore most commonly used with a Job Manager + task; if you want to use automatic Job termination without a Job Manager, + you should initially set onAllTasksComplete to noaction and update the Job + properties to set onAllTasksComplete to terminatejob once you have + finished adding Tasks. The default is noaction. Possible values include: + 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + Task fails in a Job created under this schedule. A Task is considered to + have failed if it have failed if has a failureInfo. A failureInfo is set + if the Task completes with a non-zero exit code after exhausting its retry + count, or if there was an error starting the Task, for example due to a + resource file download error. The default is noaction. Possible values + include: 'noAction', 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param network_configuration: The network configuration for the Job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration + :param constraints: The execution constraints for Jobs created under this + schedule. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: The details of a Job Manager Task to be launched + when a Job is started under this schedule. If the Job does not specify a + Job Manager Task, the user must explicitly add Tasks to the Job using the + Task API. If the Job does specify a Job Manager Task, the Batch service + creates the Job Manager Task when the Job is created, and will try to + schedule the Job Manager Task before scheduling other Tasks in the Job. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation Task for Jobs created + under this schedule. If a Job has a Job Preparation Task, the Batch + service will run the Job Preparation Task on a Node before starting any + Tasks of that Job on that Compute Node. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release Task for Jobs created under this + schedule. The primary purpose of the Job Release Task is to undo changes + to Nodes made by the Job Preparation Task. Example activities include + deleting local files, or shutting down services that were started as part + of Job preparation. A Job Release Task cannot be specified without also + specifying a Job Preparation Task for the Job. The Batch service runs the + Job Release Task on the Compute Nodes that have run the Job Preparation + Task. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: A list of common environment variable + settings. These environment variables are set for all Tasks in Jobs + created under this schedule (including the Job Manager, Job Preparation + and Job Release Tasks). Individual Tasks can override an environment + setting specified here by specifying the same setting name with a + different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: Required. The Pool on which the Batch service runs the + Tasks of Jobs created under this schedule. + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with each Job + created under this schedule as metadata. The Batch service does not assign + any meaning to metadata; it is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(JobSpecification, self).__init__(**kwargs) + self.priority = kwargs.get('priority', None) + self.display_name = kwargs.get('display_name', None) + self.uses_task_dependencies = kwargs.get('uses_task_dependencies', None) + self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) + self.on_task_failure = kwargs.get('on_task_failure', None) + self.network_configuration = kwargs.get('network_configuration', None) + self.constraints = kwargs.get('constraints', None) + self.job_manager_task = kwargs.get('job_manager_task', None) + self.job_preparation_task = kwargs.get('job_preparation_task', None) + self.job_release_task = kwargs.get('job_release_task', None) + self.common_environment_settings = kwargs.get('common_environment_settings', None) + self.pool_info = kwargs.get('pool_info', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_specification_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_specification_py3.py new file mode 100644 index 00000000..b9793eea --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_specification_py3.py @@ -0,0 +1,129 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobSpecification(Model): + """Specifies details of the Jobs to be created on a schedule. + + All required parameters must be populated in order to send to Azure. + + :param priority: The priority of Jobs created under this schedule. + Priority values can range from -1000 to 1000, with -1000 being the lowest + priority and 1000 being the highest priority. The default value is 0. This + priority is used as the default for all Jobs under the Job Schedule. You + can update a Job's priority after it has been created using by using the + update Job API. + :type priority: int + :param display_name: The display name for Jobs created under this + schedule. The name need not be unique and can contain any Unicode + characters up to a maximum length of 1024. + :type display_name: str + :param uses_task_dependencies: Whether Tasks in the Job can define + dependencies on each other. The default is false. + :type uses_task_dependencies: bool + :param on_all_tasks_complete: The action the Batch service should take + when all Tasks in a Job created under this schedule are in the completed + state. Note that if a Job contains no Tasks, then all Tasks are considered + complete. This option is therefore most commonly used with a Job Manager + task; if you want to use automatic Job termination without a Job Manager, + you should initially set onAllTasksComplete to noaction and update the Job + properties to set onAllTasksComplete to terminatejob once you have + finished adding Tasks. The default is noaction. Possible values include: + 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + :param on_task_failure: The action the Batch service should take when any + Task fails in a Job created under this schedule. A Task is considered to + have failed if it have failed if has a failureInfo. A failureInfo is set + if the Task completes with a non-zero exit code after exhausting its retry + count, or if there was an error starting the Task, for example due to a + resource file download error. The default is noaction. Possible values + include: 'noAction', 'performExitOptionsJobAction' + :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param network_configuration: The network configuration for the Job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration + :param constraints: The execution constraints for Jobs created under this + schedule. + :type constraints: ~azure.batch.models.JobConstraints + :param job_manager_task: The details of a Job Manager Task to be launched + when a Job is started under this schedule. If the Job does not specify a + Job Manager Task, the user must explicitly add Tasks to the Job using the + Task API. If the Job does specify a Job Manager Task, the Batch service + creates the Job Manager Task when the Job is created, and will try to + schedule the Job Manager Task before scheduling other Tasks in the Job. + :type job_manager_task: ~azure.batch.models.JobManagerTask + :param job_preparation_task: The Job Preparation Task for Jobs created + under this schedule. If a Job has a Job Preparation Task, the Batch + service will run the Job Preparation Task on a Node before starting any + Tasks of that Job on that Compute Node. + :type job_preparation_task: ~azure.batch.models.JobPreparationTask + :param job_release_task: The Job Release Task for Jobs created under this + schedule. The primary purpose of the Job Release Task is to undo changes + to Nodes made by the Job Preparation Task. Example activities include + deleting local files, or shutting down services that were started as part + of Job preparation. A Job Release Task cannot be specified without also + specifying a Job Preparation Task for the Job. The Batch service runs the + Job Release Task on the Compute Nodes that have run the Job Preparation + Task. + :type job_release_task: ~azure.batch.models.JobReleaseTask + :param common_environment_settings: A list of common environment variable + settings. These environment variables are set for all Tasks in Jobs + created under this schedule (including the Job Manager, Job Preparation + and Job Release Tasks). Individual Tasks can override an environment + setting specified here by specifying the same setting name with a + different value. + :type common_environment_settings: + list[~azure.batch.models.EnvironmentSetting] + :param pool_info: Required. The Pool on which the Batch service runs the + Tasks of Jobs created under this schedule. + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with each Job + created under this schedule as metadata. The Batch service does not assign + any meaning to metadata; it is solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, + 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, + 'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'}, + 'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, pool_info, priority: int=None, display_name: str=None, uses_task_dependencies: bool=None, on_all_tasks_complete=None, on_task_failure=None, network_configuration=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, metadata=None, **kwargs) -> None: + super(JobSpecification, self).__init__(**kwargs) + self.priority = priority + self.display_name = display_name + self.uses_task_dependencies = uses_task_dependencies + self.on_all_tasks_complete = on_all_tasks_complete + self.on_task_failure = on_task_failure + self.network_configuration = network_configuration + self.constraints = constraints + self.job_manager_task = job_manager_task + self.job_preparation_task = job_preparation_task + self.job_release_task = job_release_task + self.common_environment_settings = common_environment_settings + self.pool_info = pool_info + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_statistics.py b/azext/generated/sdk/batch/v2019_08_01/models/job_statistics.py new file mode 100644 index 00000000..db6d382d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_statistics.py @@ -0,0 +1,123 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobStatistics(Model): + """Resource usage statistics for a Job. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all Compute Nodes) consumed by all Tasks in the Job. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all Compute Nodes) consumed by all Tasks in the Job. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of all Tasks + in the Job. The wall clock time is the elapsed time from when the Task + started running on a Compute Node to when it finished (or to the last time + the statistics were updated, if the Task had not finished by then). If a + Task was retried, this includes the wall clock time of all the Task + retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by all Tasks in the Job. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by all Tasks in the Job. + :type write_iops: long + :param read_io_gi_b: Required. The total amount of data in GiB read from + disk by all Tasks in the Job. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total amount of data in GiB written to + disk by all Tasks in the Job. + :type write_io_gi_b: float + :param num_succeeded_tasks: Required. The total number of Tasks + successfully completed in the Job during the given time range. A Task + completes successfully if it returns exit code 0. + :type num_succeeded_tasks: long + :param num_failed_tasks: Required. The total number of Tasks in the Job + that failed during the given time range. A Task fails if it exhausts its + maximum retry count without returning exit code 0. + :type num_failed_tasks: long + :param num_task_retries: Required. The total number of retries on all the + Tasks in the Job during the given time range. + :type num_task_retries: long + :param wait_time: Required. The total wait time of all Tasks in the Job. + The wait time for a Task is defined as the elapsed time between the + creation of the Task and the start of Task execution. (If the Task is + retried due to failures, the wait time is the time to the most recent Task + execution.) This value is only reported in the Account lifetime + statistics; it is not included in the Job statistics. + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'num_succeeded_tasks': {'required': True}, + 'num_failed_tasks': {'required': True}, + 'num_task_retries': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'num_succeeded_tasks': {'key': 'numSucceededTasks', 'type': 'long'}, + 'num_failed_tasks': {'key': 'numFailedTasks', 'type': 'long'}, + 'num_task_retries': {'key': 'numTaskRetries', 'type': 'long'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(JobStatistics, self).__init__(**kwargs) + self.url = kwargs.get('url', None) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.user_cpu_time = kwargs.get('user_cpu_time', None) + self.kernel_cpu_time = kwargs.get('kernel_cpu_time', None) + self.wall_clock_time = kwargs.get('wall_clock_time', None) + self.read_iops = kwargs.get('read_iops', None) + self.write_iops = kwargs.get('write_iops', None) + self.read_io_gi_b = kwargs.get('read_io_gi_b', None) + self.write_io_gi_b = kwargs.get('write_io_gi_b', None) + self.num_succeeded_tasks = kwargs.get('num_succeeded_tasks', None) + self.num_failed_tasks = kwargs.get('num_failed_tasks', None) + self.num_task_retries = kwargs.get('num_task_retries', None) + self.wait_time = kwargs.get('wait_time', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_statistics_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_statistics_py3.py new file mode 100644 index 00000000..c12c785f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_statistics_py3.py @@ -0,0 +1,123 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobStatistics(Model): + """Resource usage statistics for a Job. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all Compute Nodes) consumed by all Tasks in the Job. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all Compute Nodes) consumed by all Tasks in the Job. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of all Tasks + in the Job. The wall clock time is the elapsed time from when the Task + started running on a Compute Node to when it finished (or to the last time + the statistics were updated, if the Task had not finished by then). If a + Task was retried, this includes the wall clock time of all the Task + retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by all Tasks in the Job. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by all Tasks in the Job. + :type write_iops: long + :param read_io_gi_b: Required. The total amount of data in GiB read from + disk by all Tasks in the Job. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total amount of data in GiB written to + disk by all Tasks in the Job. + :type write_io_gi_b: float + :param num_succeeded_tasks: Required. The total number of Tasks + successfully completed in the Job during the given time range. A Task + completes successfully if it returns exit code 0. + :type num_succeeded_tasks: long + :param num_failed_tasks: Required. The total number of Tasks in the Job + that failed during the given time range. A Task fails if it exhausts its + maximum retry count without returning exit code 0. + :type num_failed_tasks: long + :param num_task_retries: Required. The total number of retries on all the + Tasks in the Job during the given time range. + :type num_task_retries: long + :param wait_time: Required. The total wait time of all Tasks in the Job. + The wait time for a Task is defined as the elapsed time between the + creation of the Task and the start of Task execution. (If the Task is + retried due to failures, the wait time is the time to the most recent Task + execution.) This value is only reported in the Account lifetime + statistics; it is not included in the Job statistics. + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'num_succeeded_tasks': {'required': True}, + 'num_failed_tasks': {'required': True}, + 'num_task_retries': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'num_succeeded_tasks': {'key': 'numSucceededTasks', 'type': 'long'}, + 'num_failed_tasks': {'key': 'numFailedTasks', 'type': 'long'}, + 'num_task_retries': {'key': 'numTaskRetries', 'type': 'long'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, *, url: str, start_time, last_update_time, user_cpu_time, kernel_cpu_time, wall_clock_time, read_iops: int, write_iops: int, read_io_gi_b: float, write_io_gi_b: float, num_succeeded_tasks: int, num_failed_tasks: int, num_task_retries: int, wait_time, **kwargs) -> None: + super(JobStatistics, self).__init__(**kwargs) + self.url = url + self.start_time = start_time + self.last_update_time = last_update_time + self.user_cpu_time = user_cpu_time + self.kernel_cpu_time = kernel_cpu_time + self.wall_clock_time = wall_clock_time + self.read_iops = read_iops + self.write_iops = write_iops + self.read_io_gi_b = read_io_gi_b + self.write_io_gi_b = write_io_gi_b + self.num_succeeded_tasks = num_succeeded_tasks + self.num_failed_tasks = num_failed_tasks + self.num_task_retries = num_task_retries + self.wait_time = wait_time diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_terminate_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_terminate_options.py new file mode 100644 index 00000000..b858c404 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_terminate_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobTerminateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_terminate_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_terminate_options_py3.py new file mode 100644 index 00000000..77173bcc --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_terminate_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobTerminateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_terminate_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/job_terminate_parameter.py new file mode 100644 index 00000000..ac909e55 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_terminate_parameter.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobTerminateParameter(Model): + """Options when terminating a Job. + + :param terminate_reason: The text you want to appear as the Job's + TerminateReason. The default is 'UserTerminate'. + :type terminate_reason: str + """ + + _attribute_map = { + 'terminate_reason': {'key': 'terminateReason', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(JobTerminateParameter, self).__init__(**kwargs) + self.terminate_reason = kwargs.get('terminate_reason', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_terminate_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_terminate_parameter_py3.py new file mode 100644 index 00000000..d468786e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_terminate_parameter_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobTerminateParameter(Model): + """Options when terminating a Job. + + :param terminate_reason: The text you want to appear as the Job's + TerminateReason. The default is 'UserTerminate'. + :type terminate_reason: str + """ + + _attribute_map = { + 'terminate_reason': {'key': 'terminateReason', 'type': 'str'}, + } + + def __init__(self, *, terminate_reason: str=None, **kwargs) -> None: + super(JobTerminateParameter, self).__init__(**kwargs) + self.terminate_reason = terminate_reason diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_update_options.py b/azext/generated/sdk/batch/v2019_08_01/models/job_update_options.py new file mode 100644 index 00000000..a11f18ab --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_update_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(JobUpdateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_update_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_update_options_py3.py new file mode 100644 index 00000000..61a47c21 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_update_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(JobUpdateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_update_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/job_update_parameter.py new file mode 100644 index 00000000..582dba40 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_update_parameter.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobUpdateParameter(Model): + """The set of changes to be made to a Job. + + All required parameters must be populated in order to send to Azure. + + :param priority: The priority of the Job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. If omitted, it is set to the default value 0. + :type priority: int + :param constraints: The execution constraints for the Job. If omitted, the + constraints are cleared. + :type constraints: ~azure.batch.models.JobConstraints + :param pool_info: Required. The Pool on which the Batch service runs the + Job's Tasks. You may change the Pool for a Job only when the Job is + disabled. The Update Job call will fail if you include the poolInfo + element and the Job is not disabled. If you specify an + autoPoolSpecification in the poolInfo, only the keepAlive property of the + autoPoolSpecification can be updated, and then only if the + autoPoolSpecification has a poolLifetimeOption of Job (other job + properties can be updated as normal). + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with the Job as + metadata. If omitted, it takes the default value of an empty list; in + effect, any existing metadata is deleted. + :type metadata: list[~azure.batch.models.MetadataItem] + :param on_all_tasks_complete: The action the Batch service should take + when all Tasks in the Job are in the completed state. If omitted, the + completion behavior is set to noaction. If the current value is + terminatejob, this is an error because a Job's completion behavior may not + be changed from terminatejob to noaction. You may not change the value + from terminatejob to noaction - that is, once you have engaged automatic + Job termination, you cannot turn it off again. If you try to do this, the + request fails and Batch returns status code 400 (Bad Request) and an + 'invalid property value' error response. If you do not specify this + element in a PUT request, it is equivalent to passing noaction. This is an + error if the current value is terminatejob. Possible values include: + 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + """ + + _validation = { + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + } + + def __init__(self, **kwargs): + super(JobUpdateParameter, self).__init__(**kwargs) + self.priority = kwargs.get('priority', None) + self.constraints = kwargs.get('constraints', None) + self.pool_info = kwargs.get('pool_info', None) + self.metadata = kwargs.get('metadata', None) + self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/job_update_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/job_update_parameter_py3.py new file mode 100644 index 00000000..8318d6fb --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/job_update_parameter_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobUpdateParameter(Model): + """The set of changes to be made to a Job. + + All required parameters must be populated in order to send to Azure. + + :param priority: The priority of the Job. Priority values can range from + -1000 to 1000, with -1000 being the lowest priority and 1000 being the + highest priority. If omitted, it is set to the default value 0. + :type priority: int + :param constraints: The execution constraints for the Job. If omitted, the + constraints are cleared. + :type constraints: ~azure.batch.models.JobConstraints + :param pool_info: Required. The Pool on which the Batch service runs the + Job's Tasks. You may change the Pool for a Job only when the Job is + disabled. The Update Job call will fail if you include the poolInfo + element and the Job is not disabled. If you specify an + autoPoolSpecification in the poolInfo, only the keepAlive property of the + autoPoolSpecification can be updated, and then only if the + autoPoolSpecification has a poolLifetimeOption of Job (other job + properties can be updated as normal). + :type pool_info: ~azure.batch.models.PoolInformation + :param metadata: A list of name-value pairs associated with the Job as + metadata. If omitted, it takes the default value of an empty list; in + effect, any existing metadata is deleted. + :type metadata: list[~azure.batch.models.MetadataItem] + :param on_all_tasks_complete: The action the Batch service should take + when all Tasks in the Job are in the completed state. If omitted, the + completion behavior is set to noaction. If the current value is + terminatejob, this is an error because a Job's completion behavior may not + be changed from terminatejob to noaction. You may not change the value + from terminatejob to noaction - that is, once you have engaged automatic + Job termination, you cannot turn it off again. If you try to do this, the + request fails and Batch returns status code 400 (Bad Request) and an + 'invalid property value' error response. If you do not specify this + element in a PUT request, it is equivalent to passing noaction. This is an + error if the current value is terminatejob. Possible values include: + 'noAction', 'terminateJob' + :type on_all_tasks_complete: str or ~azure.batch.models.OnAllTasksComplete + """ + + _validation = { + 'pool_info': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, + 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, + } + + def __init__(self, *, pool_info, priority: int=None, constraints=None, metadata=None, on_all_tasks_complete=None, **kwargs) -> None: + super(JobUpdateParameter, self).__init__(**kwargs) + self.priority = priority + self.constraints = constraints + self.pool_info = pool_info + self.metadata = metadata + self.on_all_tasks_complete = on_all_tasks_complete diff --git a/azext/generated/sdk/batch/v2019_08_01/models/linux_user_configuration.py b/azext/generated/sdk/batch/v2019_08_01/models/linux_user_configuration.py new file mode 100644 index 00000000..25f2768f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/linux_user_configuration.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class LinuxUserConfiguration(Model): + """Properties used to create a user Account on a Linux Compute Node. + + :param uid: The user ID of the user Account. The uid and gid properties + must be specified together or not at all. If not specified the underlying + operating system picks the uid. + :type uid: int + :param gid: The group ID for the user Account. The uid and gid properties + must be specified together or not at all. If not specified the underlying + operating system picks the gid. + :type gid: int + :param ssh_private_key: The SSH private key for the user Account. The + private key must not be password protected. The private key is used to + automatically configure asymmetric-key based authentication for SSH + between Compute Nodes in a Linux Pool when the Pool's + enableInterNodeCommunication property is true (it is ignored if + enableInterNodeCommunication is false). It does this by placing the key + pair into the user's .ssh directory. If not specified, password-less SSH + is not configured between Compute Nodes (no modification of the user's + .ssh directory is done). + :type ssh_private_key: str + """ + + _attribute_map = { + 'uid': {'key': 'uid', 'type': 'int'}, + 'gid': {'key': 'gid', 'type': 'int'}, + 'ssh_private_key': {'key': 'sshPrivateKey', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(LinuxUserConfiguration, self).__init__(**kwargs) + self.uid = kwargs.get('uid', None) + self.gid = kwargs.get('gid', None) + self.ssh_private_key = kwargs.get('ssh_private_key', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/linux_user_configuration_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/linux_user_configuration_py3.py new file mode 100644 index 00000000..5025ea07 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/linux_user_configuration_py3.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class LinuxUserConfiguration(Model): + """Properties used to create a user Account on a Linux Compute Node. + + :param uid: The user ID of the user Account. The uid and gid properties + must be specified together or not at all. If not specified the underlying + operating system picks the uid. + :type uid: int + :param gid: The group ID for the user Account. The uid and gid properties + must be specified together or not at all. If not specified the underlying + operating system picks the gid. + :type gid: int + :param ssh_private_key: The SSH private key for the user Account. The + private key must not be password protected. The private key is used to + automatically configure asymmetric-key based authentication for SSH + between Compute Nodes in a Linux Pool when the Pool's + enableInterNodeCommunication property is true (it is ignored if + enableInterNodeCommunication is false). It does this by placing the key + pair into the user's .ssh directory. If not specified, password-less SSH + is not configured between Compute Nodes (no modification of the user's + .ssh directory is done). + :type ssh_private_key: str + """ + + _attribute_map = { + 'uid': {'key': 'uid', 'type': 'int'}, + 'gid': {'key': 'gid', 'type': 'int'}, + 'ssh_private_key': {'key': 'sshPrivateKey', 'type': 'str'}, + } + + def __init__(self, *, uid: int=None, gid: int=None, ssh_private_key: str=None, **kwargs) -> None: + super(LinuxUserConfiguration, self).__init__(**kwargs) + self.uid = uid + self.gid = gid + self.ssh_private_key = ssh_private_key diff --git a/azext/generated/sdk/batch/v2019_08_01/models/metadata_item.py b/azext/generated/sdk/batch/v2019_08_01/models/metadata_item.py new file mode 100644 index 00000000..d1d203e8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/metadata_item.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class MetadataItem(Model): + """A name-value pair associated with a Batch service resource. + + The Batch service does not assign any meaning to this metadata; it is + solely for the use of user code. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the metadata item. + :type name: str + :param value: Required. The value of the metadata item. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(MetadataItem, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/metadata_item_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/metadata_item_py3.py new file mode 100644 index 00000000..3d127cd1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/metadata_item_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class MetadataItem(Model): + """A name-value pair associated with a Batch service resource. + + The Batch service does not assign any meaning to this metadata; it is + solely for the use of user code. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the metadata item. + :type name: str + :param value: Required. The value of the metadata item. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, *, name: str, value: str, **kwargs) -> None: + super(MetadataItem, self).__init__(**kwargs) + self.name = name + self.value = value diff --git a/azext/generated/sdk/batch/v2019_08_01/models/mount_configuration.py b/azext/generated/sdk/batch/v2019_08_01/models/mount_configuration.py new file mode 100644 index 00000000..8cbe7dc8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/mount_configuration.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class MountConfiguration(Model): + """The file system to mount on each node. + + :param azure_blob_file_system_configuration: The Azure Storage Container + to mount using blob FUSE on each node. This property is mutually exclusive + with all other properties. + :type azure_blob_file_system_configuration: + ~azure.batch.models.AzureBlobFileSystemConfiguration + :param nfs_mount_configuration: The NFS file system to mount on each node. + This property is mutually exclusive with all other properties. + :type nfs_mount_configuration: ~azure.batch.models.NFSMountConfiguration + :param cifs_mount_configuration: The CIFS/SMB file system to mount on each + node. This property is mutually exclusive with all other properties. + :type cifs_mount_configuration: ~azure.batch.models.CIFSMountConfiguration + :param azure_file_share_configuration: The Azure File Share to mount on + each node. This property is mutually exclusive with all other properties. + :type azure_file_share_configuration: + ~azure.batch.models.AzureFileShareConfiguration + """ + + _attribute_map = { + 'azure_blob_file_system_configuration': {'key': 'azureBlobFileSystemConfiguration', 'type': 'AzureBlobFileSystemConfiguration'}, + 'nfs_mount_configuration': {'key': 'nfsMountConfiguration', 'type': 'NFSMountConfiguration'}, + 'cifs_mount_configuration': {'key': 'cifsMountConfiguration', 'type': 'CIFSMountConfiguration'}, + 'azure_file_share_configuration': {'key': 'azureFileShareConfiguration', 'type': 'AzureFileShareConfiguration'}, + } + + def __init__(self, **kwargs): + super(MountConfiguration, self).__init__(**kwargs) + self.azure_blob_file_system_configuration = kwargs.get('azure_blob_file_system_configuration', None) + self.nfs_mount_configuration = kwargs.get('nfs_mount_configuration', None) + self.cifs_mount_configuration = kwargs.get('cifs_mount_configuration', None) + self.azure_file_share_configuration = kwargs.get('azure_file_share_configuration', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/mount_configuration_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/mount_configuration_py3.py new file mode 100644 index 00000000..3f670fc6 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/mount_configuration_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class MountConfiguration(Model): + """The file system to mount on each node. + + :param azure_blob_file_system_configuration: The Azure Storage Container + to mount using blob FUSE on each node. This property is mutually exclusive + with all other properties. + :type azure_blob_file_system_configuration: + ~azure.batch.models.AzureBlobFileSystemConfiguration + :param nfs_mount_configuration: The NFS file system to mount on each node. + This property is mutually exclusive with all other properties. + :type nfs_mount_configuration: ~azure.batch.models.NFSMountConfiguration + :param cifs_mount_configuration: The CIFS/SMB file system to mount on each + node. This property is mutually exclusive with all other properties. + :type cifs_mount_configuration: ~azure.batch.models.CIFSMountConfiguration + :param azure_file_share_configuration: The Azure File Share to mount on + each node. This property is mutually exclusive with all other properties. + :type azure_file_share_configuration: + ~azure.batch.models.AzureFileShareConfiguration + """ + + _attribute_map = { + 'azure_blob_file_system_configuration': {'key': 'azureBlobFileSystemConfiguration', 'type': 'AzureBlobFileSystemConfiguration'}, + 'nfs_mount_configuration': {'key': 'nfsMountConfiguration', 'type': 'NFSMountConfiguration'}, + 'cifs_mount_configuration': {'key': 'cifsMountConfiguration', 'type': 'CIFSMountConfiguration'}, + 'azure_file_share_configuration': {'key': 'azureFileShareConfiguration', 'type': 'AzureFileShareConfiguration'}, + } + + def __init__(self, *, azure_blob_file_system_configuration=None, nfs_mount_configuration=None, cifs_mount_configuration=None, azure_file_share_configuration=None, **kwargs) -> None: + super(MountConfiguration, self).__init__(**kwargs) + self.azure_blob_file_system_configuration = azure_blob_file_system_configuration + self.nfs_mount_configuration = nfs_mount_configuration + self.cifs_mount_configuration = cifs_mount_configuration + self.azure_file_share_configuration = azure_file_share_configuration diff --git a/azext/generated/sdk/batch/v2019_08_01/models/multi_instance_settings.py b/azext/generated/sdk/batch/v2019_08_01/models/multi_instance_settings.py new file mode 100644 index 00000000..d88a46e1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/multi_instance_settings.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class MultiInstanceSettings(Model): + """Settings which specify how to run a multi-instance Task. + + Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI + case, if any of the subtasks fail (for example due to exiting with a + non-zero exit code) the entire multi-instance Task fails. The + multi-instance Task is then terminated and retried, up to its retry limit. + + All required parameters must be populated in order to send to Azure. + + :param number_of_instances: The number of Compute Nodes required by the + Task. If omitted, the default is 1. + :type number_of_instances: int + :param coordination_command_line: Required. The command line to run on all + the Compute Nodes to enable them to coordinate when the primary runs the + main Task command. A typical coordination command line launches a + background service and verifies that the service is ready to process + inter-node messages. + :type coordination_command_line: str + :param common_resource_files: A list of files that the Batch service will + download before running the coordination command line. The difference + between common resource files and Task resource files is that common + resource files are downloaded for all subtasks including the primary, + whereas Task resource files are downloaded only for the primary. Also note + that these resource files are not downloaded to the Task working + directory, but instead are downloaded to the Task root directory (one + directory above the working directory). There is a maximum size for the + list of resource files. When the max size is exceeded, the request will + fail and the response error code will be RequestEntityTooLarge. If this + occurs, the collection of ResourceFiles must be reduced in size. This can + be achieved using .zip files, Application Packages, or Docker Containers. + :type common_resource_files: list[~azure.batch.models.ResourceFile] + """ + + _validation = { + 'coordination_command_line': {'required': True}, + } + + _attribute_map = { + 'number_of_instances': {'key': 'numberOfInstances', 'type': 'int'}, + 'coordination_command_line': {'key': 'coordinationCommandLine', 'type': 'str'}, + 'common_resource_files': {'key': 'commonResourceFiles', 'type': '[ResourceFile]'}, + } + + def __init__(self, **kwargs): + super(MultiInstanceSettings, self).__init__(**kwargs) + self.number_of_instances = kwargs.get('number_of_instances', None) + self.coordination_command_line = kwargs.get('coordination_command_line', None) + self.common_resource_files = kwargs.get('common_resource_files', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/multi_instance_settings_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/multi_instance_settings_py3.py new file mode 100644 index 00000000..347a46ea --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/multi_instance_settings_py3.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class MultiInstanceSettings(Model): + """Settings which specify how to run a multi-instance Task. + + Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI + case, if any of the subtasks fail (for example due to exiting with a + non-zero exit code) the entire multi-instance Task fails. The + multi-instance Task is then terminated and retried, up to its retry limit. + + All required parameters must be populated in order to send to Azure. + + :param number_of_instances: The number of Compute Nodes required by the + Task. If omitted, the default is 1. + :type number_of_instances: int + :param coordination_command_line: Required. The command line to run on all + the Compute Nodes to enable them to coordinate when the primary runs the + main Task command. A typical coordination command line launches a + background service and verifies that the service is ready to process + inter-node messages. + :type coordination_command_line: str + :param common_resource_files: A list of files that the Batch service will + download before running the coordination command line. The difference + between common resource files and Task resource files is that common + resource files are downloaded for all subtasks including the primary, + whereas Task resource files are downloaded only for the primary. Also note + that these resource files are not downloaded to the Task working + directory, but instead are downloaded to the Task root directory (one + directory above the working directory). There is a maximum size for the + list of resource files. When the max size is exceeded, the request will + fail and the response error code will be RequestEntityTooLarge. If this + occurs, the collection of ResourceFiles must be reduced in size. This can + be achieved using .zip files, Application Packages, or Docker Containers. + :type common_resource_files: list[~azure.batch.models.ResourceFile] + """ + + _validation = { + 'coordination_command_line': {'required': True}, + } + + _attribute_map = { + 'number_of_instances': {'key': 'numberOfInstances', 'type': 'int'}, + 'coordination_command_line': {'key': 'coordinationCommandLine', 'type': 'str'}, + 'common_resource_files': {'key': 'commonResourceFiles', 'type': '[ResourceFile]'}, + } + + def __init__(self, *, coordination_command_line: str, number_of_instances: int=None, common_resource_files=None, **kwargs) -> None: + super(MultiInstanceSettings, self).__init__(**kwargs) + self.number_of_instances = number_of_instances + self.coordination_command_line = coordination_command_line + self.common_resource_files = common_resource_files diff --git a/azext/generated/sdk/batch/v2019_08_01/models/name_value_pair.py b/azext/generated/sdk/batch/v2019_08_01/models/name_value_pair.py new file mode 100644 index 00000000..d2775a33 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/name_value_pair.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NameValuePair(Model): + """Represents a name-value pair. + + :param name: The name in the name-value pair. + :type name: str + :param value: The value in the name-value pair. + :type value: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(NameValuePair, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/name_value_pair_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/name_value_pair_py3.py new file mode 100644 index 00000000..9e508e56 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/name_value_pair_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NameValuePair(Model): + """Represents a name-value pair. + + :param name: The name in the name-value pair. + :type name: str + :param value: The value in the name-value pair. + :type value: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, value: str=None, **kwargs) -> None: + super(NameValuePair, self).__init__(**kwargs) + self.name = name + self.value = value diff --git a/azext/generated/sdk/batch/v2019_08_01/models/network_configuration.py b/azext/generated/sdk/batch/v2019_08_01/models/network_configuration.py new file mode 100644 index 00000000..fa0ada16 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/network_configuration.py @@ -0,0 +1,76 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NetworkConfiguration(Model): + """The network configuration for a Pool. + + :param subnet_id: The ARM resource identifier of the virtual network + subnet which the Compute Nodes of the Pool will join. This is of the form + /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. + The virtual network must be in the same region and subscription as the + Azure Batch Account. The specified subnet should have enough free IP + addresses to accommodate the number of Compute Nodes in the Pool. If the + subnet doesn't have enough free IP addresses, the Pool will partially + allocate Nodes, and a resize error will occur. The 'MicrosoftAzureBatch' + service principal must have the 'Classic Virtual Machine Contributor' + Role-Based Access Control (RBAC) role for the specified VNet. The + specified subnet must allow communication from the Azure Batch service to + be able to schedule Tasks on the Nodes. This can be verified by checking + if the specified VNet has any associated Network Security Groups (NSG). If + communication to the Nodes in the specified subnet is denied by an NSG, + then the Batch service will set the state of the Compute Nodes to + unusable. For Pools created with virtualMachineConfiguration only ARM + virtual networks ('Microsoft.Network/virtualNetworks') are supported, but + for Pools created with cloudServiceConfiguration both ARM and classic + virtual networks are supported. If the specified VNet has any associated + Network Security Groups (NSG), then a few reserved system ports must be + enabled for inbound communication. For Pools created with a virtual + machine configuration, enable ports 29876 and 29877, as well as port 22 + for Linux and port 3389 for Windows. For Pools created with a cloud + service configuration, enable ports 10100, 20100, and 30100. Also enable + outbound connections to Azure Storage on port 443. For more details see: + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration + :type subnet_id: str + :param dynamic_vnet_assignment_scope: The scope of dynamic vnet + assignment. Possible values include: 'none', 'job' + :type dynamic_vnet_assignment_scope: str or + ~azure.batch.models.DynamicVNetAssignmentScope + :param endpoint_configuration: The configuration for endpoints on Compute + Nodes in the Batch Pool. Pool endpoint configuration is only supported on + Pools with the virtualMachineConfiguration property. + :type endpoint_configuration: + ~azure.batch.models.PoolEndpointConfiguration + :param public_ips: The list of public IPs which the Batch service will use + when provisioning Compute Nodes. The number of IPs specified here limits + the maximum size of the Pool - 50 dedicated nodes or 20 low-priority nodes + can be allocated for each public IP. For example, a pool needing 150 + dedicated VMs would need at least 3 public IPs specified. Each element of + this collection is of the form: + /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. + :type public_ips: list[str] + """ + + _attribute_map = { + 'subnet_id': {'key': 'subnetId', 'type': 'str'}, + 'dynamic_vnet_assignment_scope': {'key': 'dynamicVNetAssignmentScope', 'type': 'DynamicVNetAssignmentScope'}, + 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'PoolEndpointConfiguration'}, + 'public_ips': {'key': 'publicIPs', 'type': '[str]'}, + } + + def __init__(self, **kwargs): + super(NetworkConfiguration, self).__init__(**kwargs) + self.subnet_id = kwargs.get('subnet_id', None) + self.dynamic_vnet_assignment_scope = kwargs.get('dynamic_vnet_assignment_scope', None) + self.endpoint_configuration = kwargs.get('endpoint_configuration', None) + self.public_ips = kwargs.get('public_ips', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/network_configuration_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/network_configuration_py3.py new file mode 100644 index 00000000..eba1333c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/network_configuration_py3.py @@ -0,0 +1,76 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NetworkConfiguration(Model): + """The network configuration for a Pool. + + :param subnet_id: The ARM resource identifier of the virtual network + subnet which the Compute Nodes of the Pool will join. This is of the form + /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. + The virtual network must be in the same region and subscription as the + Azure Batch Account. The specified subnet should have enough free IP + addresses to accommodate the number of Compute Nodes in the Pool. If the + subnet doesn't have enough free IP addresses, the Pool will partially + allocate Nodes, and a resize error will occur. The 'MicrosoftAzureBatch' + service principal must have the 'Classic Virtual Machine Contributor' + Role-Based Access Control (RBAC) role for the specified VNet. The + specified subnet must allow communication from the Azure Batch service to + be able to schedule Tasks on the Nodes. This can be verified by checking + if the specified VNet has any associated Network Security Groups (NSG). If + communication to the Nodes in the specified subnet is denied by an NSG, + then the Batch service will set the state of the Compute Nodes to + unusable. For Pools created with virtualMachineConfiguration only ARM + virtual networks ('Microsoft.Network/virtualNetworks') are supported, but + for Pools created with cloudServiceConfiguration both ARM and classic + virtual networks are supported. If the specified VNet has any associated + Network Security Groups (NSG), then a few reserved system ports must be + enabled for inbound communication. For Pools created with a virtual + machine configuration, enable ports 29876 and 29877, as well as port 22 + for Linux and port 3389 for Windows. For Pools created with a cloud + service configuration, enable ports 10100, 20100, and 30100. Also enable + outbound connections to Azure Storage on port 443. For more details see: + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration + :type subnet_id: str + :param dynamic_vnet_assignment_scope: The scope of dynamic vnet + assignment. Possible values include: 'none', 'job' + :type dynamic_vnet_assignment_scope: str or + ~azure.batch.models.DynamicVNetAssignmentScope + :param endpoint_configuration: The configuration for endpoints on Compute + Nodes in the Batch Pool. Pool endpoint configuration is only supported on + Pools with the virtualMachineConfiguration property. + :type endpoint_configuration: + ~azure.batch.models.PoolEndpointConfiguration + :param public_ips: The list of public IPs which the Batch service will use + when provisioning Compute Nodes. The number of IPs specified here limits + the maximum size of the Pool - 50 dedicated nodes or 20 low-priority nodes + can be allocated for each public IP. For example, a pool needing 150 + dedicated VMs would need at least 3 public IPs specified. Each element of + this collection is of the form: + /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. + :type public_ips: list[str] + """ + + _attribute_map = { + 'subnet_id': {'key': 'subnetId', 'type': 'str'}, + 'dynamic_vnet_assignment_scope': {'key': 'dynamicVNetAssignmentScope', 'type': 'DynamicVNetAssignmentScope'}, + 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'PoolEndpointConfiguration'}, + 'public_ips': {'key': 'publicIPs', 'type': '[str]'}, + } + + def __init__(self, *, subnet_id: str=None, dynamic_vnet_assignment_scope=None, endpoint_configuration=None, public_ips=None, **kwargs) -> None: + super(NetworkConfiguration, self).__init__(**kwargs) + self.subnet_id = subnet_id + self.dynamic_vnet_assignment_scope = dynamic_vnet_assignment_scope + self.endpoint_configuration = endpoint_configuration + self.public_ips = public_ips diff --git a/azext/generated/sdk/batch/v2019_08_01/models/network_security_group_rule.py b/azext/generated/sdk/batch/v2019_08_01/models/network_security_group_rule.py new file mode 100644 index 00000000..fe3fd6a8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/network_security_group_rule.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NetworkSecurityGroupRule(Model): + """A network security group rule to apply to an inbound endpoint. + + All required parameters must be populated in order to send to Azure. + + :param priority: Required. The priority for this rule. Priorities within a + Pool must be unique and are evaluated in order of priority. The lower the + number the higher the priority. For example, rules could be specified with + order numbers of 150, 250, and 350. The rule with the order number of 150 + takes precedence over the rule that has an order of 250. Allowed + priorities are 150 to 3500. If any reserved or duplicate values are + provided the request fails with HTTP status code 400. + :type priority: int + :param access: Required. The action that should be taken for a specified + IP address, subnet range or tag. Possible values include: 'allow', 'deny' + :type access: str or ~azure.batch.models.NetworkSecurityGroupRuleAccess + :param source_address_prefix: Required. The source address prefix or tag + to match for the rule. Valid values are a single IP address (i.e. + 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all + addresses). If any other values are provided the request fails with HTTP + status code 400. + :type source_address_prefix: str + :param source_port_ranges: The source port ranges to match for the rule. + Valid values are '*' (for all ports 0 - 65535), a specific port (i.e. 22), + or a port range (i.e. 100-200). The ports must be in the range of 0 to + 65535. Each entry in this collection must not overlap any other entry + (either a range or an individual port). If any other values are provided + the request fails with HTTP status code 400. The default value is '*'. + :type source_port_ranges: list[str] + """ + + _validation = { + 'priority': {'required': True}, + 'access': {'required': True}, + 'source_address_prefix': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'access': {'key': 'access', 'type': 'NetworkSecurityGroupRuleAccess'}, + 'source_address_prefix': {'key': 'sourceAddressPrefix', 'type': 'str'}, + 'source_port_ranges': {'key': 'sourcePortRanges', 'type': '[str]'}, + } + + def __init__(self, **kwargs): + super(NetworkSecurityGroupRule, self).__init__(**kwargs) + self.priority = kwargs.get('priority', None) + self.access = kwargs.get('access', None) + self.source_address_prefix = kwargs.get('source_address_prefix', None) + self.source_port_ranges = kwargs.get('source_port_ranges', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/network_security_group_rule_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/network_security_group_rule_py3.py new file mode 100644 index 00000000..bed8fb49 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/network_security_group_rule_py3.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NetworkSecurityGroupRule(Model): + """A network security group rule to apply to an inbound endpoint. + + All required parameters must be populated in order to send to Azure. + + :param priority: Required. The priority for this rule. Priorities within a + Pool must be unique and are evaluated in order of priority. The lower the + number the higher the priority. For example, rules could be specified with + order numbers of 150, 250, and 350. The rule with the order number of 150 + takes precedence over the rule that has an order of 250. Allowed + priorities are 150 to 3500. If any reserved or duplicate values are + provided the request fails with HTTP status code 400. + :type priority: int + :param access: Required. The action that should be taken for a specified + IP address, subnet range or tag. Possible values include: 'allow', 'deny' + :type access: str or ~azure.batch.models.NetworkSecurityGroupRuleAccess + :param source_address_prefix: Required. The source address prefix or tag + to match for the rule. Valid values are a single IP address (i.e. + 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all + addresses). If any other values are provided the request fails with HTTP + status code 400. + :type source_address_prefix: str + :param source_port_ranges: The source port ranges to match for the rule. + Valid values are '*' (for all ports 0 - 65535), a specific port (i.e. 22), + or a port range (i.e. 100-200). The ports must be in the range of 0 to + 65535. Each entry in this collection must not overlap any other entry + (either a range or an individual port). If any other values are provided + the request fails with HTTP status code 400. The default value is '*'. + :type source_port_ranges: list[str] + """ + + _validation = { + 'priority': {'required': True}, + 'access': {'required': True}, + 'source_address_prefix': {'required': True}, + } + + _attribute_map = { + 'priority': {'key': 'priority', 'type': 'int'}, + 'access': {'key': 'access', 'type': 'NetworkSecurityGroupRuleAccess'}, + 'source_address_prefix': {'key': 'sourceAddressPrefix', 'type': 'str'}, + 'source_port_ranges': {'key': 'sourcePortRanges', 'type': '[str]'}, + } + + def __init__(self, *, priority: int, access, source_address_prefix: str, source_port_ranges=None, **kwargs) -> None: + super(NetworkSecurityGroupRule, self).__init__(**kwargs) + self.priority = priority + self.access = access + self.source_address_prefix = source_address_prefix + self.source_port_ranges = source_port_ranges diff --git a/azext/generated/sdk/batch/v2019_08_01/models/nfs_mount_configuration.py b/azext/generated/sdk/batch/v2019_08_01/models/nfs_mount_configuration.py new file mode 100644 index 00000000..611e457d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/nfs_mount_configuration.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NFSMountConfiguration(Model): + """Information used to connect to an NFS file system. + + All required parameters must be populated in order to send to Azure. + + :param source: Required. The URI of the file system to mount. + :type source: str + :param relative_mount_path: Required. The relative path on the compute + node where the file system will be mounted. All file systems are mounted + relative to the Batch mounts directory, accessible via the + AZ_BATCH_NODE_MOUNTS_DIR environment variable. + :type relative_mount_path: str + :param mount_options: Additional command line options to pass to the mount + command. These are 'net use' options in Windows and 'mount' options in + Linux. + :type mount_options: str + """ + + _validation = { + 'source': {'required': True}, + 'relative_mount_path': {'required': True}, + } + + _attribute_map = { + 'source': {'key': 'source', 'type': 'str'}, + 'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'}, + 'mount_options': {'key': 'mountOptions', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(NFSMountConfiguration, self).__init__(**kwargs) + self.source = kwargs.get('source', None) + self.relative_mount_path = kwargs.get('relative_mount_path', None) + self.mount_options = kwargs.get('mount_options', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/nfs_mount_configuration_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/nfs_mount_configuration_py3.py new file mode 100644 index 00000000..9cba9748 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/nfs_mount_configuration_py3.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NFSMountConfiguration(Model): + """Information used to connect to an NFS file system. + + All required parameters must be populated in order to send to Azure. + + :param source: Required. The URI of the file system to mount. + :type source: str + :param relative_mount_path: Required. The relative path on the compute + node where the file system will be mounted. All file systems are mounted + relative to the Batch mounts directory, accessible via the + AZ_BATCH_NODE_MOUNTS_DIR environment variable. + :type relative_mount_path: str + :param mount_options: Additional command line options to pass to the mount + command. These are 'net use' options in Windows and 'mount' options in + Linux. + :type mount_options: str + """ + + _validation = { + 'source': {'required': True}, + 'relative_mount_path': {'required': True}, + } + + _attribute_map = { + 'source': {'key': 'source', 'type': 'str'}, + 'relative_mount_path': {'key': 'relativeMountPath', 'type': 'str'}, + 'mount_options': {'key': 'mountOptions', 'type': 'str'}, + } + + def __init__(self, *, source: str, relative_mount_path: str, mount_options: str=None, **kwargs) -> None: + super(NFSMountConfiguration, self).__init__(**kwargs) + self.source = source + self.relative_mount_path = relative_mount_path + self.mount_options = mount_options diff --git a/azext/generated/sdk/batch/v2019_08_01/models/node_agent_information.py b/azext/generated/sdk/batch/v2019_08_01/models/node_agent_information.py new file mode 100644 index 00000000..d0ec5f5f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/node_agent_information.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeAgentInformation(Model): + """Information about the Compute Node agent. + + The Batch Compute Node agent is a program that runs on each Compute Node in + the Pool and provides Batch capability on the Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The version of the Batch Compute Node agent + running on the Compute Node. This version number can be checked against + the Compute Node agent release notes located at + https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. + :type version: str + :param last_update_time: Required. The time when the Compute Node agent + was updated on the Compute Node. This is the most recent time that the + Compute Node agent was updated to a new version. + :type last_update_time: datetime + """ + + _validation = { + 'version': {'required': True}, + 'last_update_time': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'version', 'type': 'str'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(NodeAgentInformation, self).__init__(**kwargs) + self.version = kwargs.get('version', None) + self.last_update_time = kwargs.get('last_update_time', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/node_agent_information_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/node_agent_information_py3.py new file mode 100644 index 00000000..2be72956 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/node_agent_information_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeAgentInformation(Model): + """Information about the Compute Node agent. + + The Batch Compute Node agent is a program that runs on each Compute Node in + the Pool and provides Batch capability on the Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The version of the Batch Compute Node agent + running on the Compute Node. This version number can be checked against + the Compute Node agent release notes located at + https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. + :type version: str + :param last_update_time: Required. The time when the Compute Node agent + was updated on the Compute Node. This is the most recent time that the + Compute Node agent was updated to a new version. + :type last_update_time: datetime + """ + + _validation = { + 'version': {'required': True}, + 'last_update_time': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'version', 'type': 'str'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + } + + def __init__(self, *, version: str, last_update_time, **kwargs) -> None: + super(NodeAgentInformation, self).__init__(**kwargs) + self.version = version + self.last_update_time = last_update_time diff --git a/azext/generated/sdk/batch/v2019_08_01/models/node_counts.py b/azext/generated/sdk/batch/v2019_08_01/models/node_counts.py new file mode 100644 index 00000000..8cb93d4a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/node_counts.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeCounts(Model): + """The number of Compute Nodes in each Compute Node state. + + All required parameters must be populated in order to send to Azure. + + :param creating: Required. The number of Compute Nodes in the creating + state. + :type creating: int + :param idle: Required. The number of Compute Nodes in the idle state. + :type idle: int + :param offline: Required. The number of Compute Nodes in the offline + state. + :type offline: int + :param preempted: Required. The number of Compute Nodes in the preempted + state. + :type preempted: int + :param rebooting: Required. The count of Compute Nodes in the rebooting + state. + :type rebooting: int + :param reimaging: Required. The number of Compute Nodes in the reimaging + state. + :type reimaging: int + :param running: Required. The number of Compute Nodes in the running + state. + :type running: int + :param starting: Required. The number of Compute Nodes in the starting + state. + :type starting: int + :param start_task_failed: Required. The number of Compute Nodes in the + startTaskFailed state. + :type start_task_failed: int + :param leaving_pool: Required. The number of Compute Nodes in the + leavingPool state. + :type leaving_pool: int + :param unknown: Required. The number of Compute Nodes in the unknown + state. + :type unknown: int + :param unusable: Required. The number of Compute Nodes in the unusable + state. + :type unusable: int + :param waiting_for_start_task: Required. The number of Compute Nodes in + the waitingForStartTask state. + :type waiting_for_start_task: int + :param total: Required. The total number of Compute Nodes. + :type total: int + """ + + _validation = { + 'creating': {'required': True}, + 'idle': {'required': True}, + 'offline': {'required': True}, + 'preempted': {'required': True}, + 'rebooting': {'required': True}, + 'reimaging': {'required': True}, + 'running': {'required': True}, + 'starting': {'required': True}, + 'start_task_failed': {'required': True}, + 'leaving_pool': {'required': True}, + 'unknown': {'required': True}, + 'unusable': {'required': True}, + 'waiting_for_start_task': {'required': True}, + 'total': {'required': True}, + } + + _attribute_map = { + 'creating': {'key': 'creating', 'type': 'int'}, + 'idle': {'key': 'idle', 'type': 'int'}, + 'offline': {'key': 'offline', 'type': 'int'}, + 'preempted': {'key': 'preempted', 'type': 'int'}, + 'rebooting': {'key': 'rebooting', 'type': 'int'}, + 'reimaging': {'key': 'reimaging', 'type': 'int'}, + 'running': {'key': 'running', 'type': 'int'}, + 'starting': {'key': 'starting', 'type': 'int'}, + 'start_task_failed': {'key': 'startTaskFailed', 'type': 'int'}, + 'leaving_pool': {'key': 'leavingPool', 'type': 'int'}, + 'unknown': {'key': 'unknown', 'type': 'int'}, + 'unusable': {'key': 'unusable', 'type': 'int'}, + 'waiting_for_start_task': {'key': 'waitingForStartTask', 'type': 'int'}, + 'total': {'key': 'total', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(NodeCounts, self).__init__(**kwargs) + self.creating = kwargs.get('creating', None) + self.idle = kwargs.get('idle', None) + self.offline = kwargs.get('offline', None) + self.preempted = kwargs.get('preempted', None) + self.rebooting = kwargs.get('rebooting', None) + self.reimaging = kwargs.get('reimaging', None) + self.running = kwargs.get('running', None) + self.starting = kwargs.get('starting', None) + self.start_task_failed = kwargs.get('start_task_failed', None) + self.leaving_pool = kwargs.get('leaving_pool', None) + self.unknown = kwargs.get('unknown', None) + self.unusable = kwargs.get('unusable', None) + self.waiting_for_start_task = kwargs.get('waiting_for_start_task', None) + self.total = kwargs.get('total', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/node_counts_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/node_counts_py3.py new file mode 100644 index 00000000..5080115b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/node_counts_py3.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeCounts(Model): + """The number of Compute Nodes in each Compute Node state. + + All required parameters must be populated in order to send to Azure. + + :param creating: Required. The number of Compute Nodes in the creating + state. + :type creating: int + :param idle: Required. The number of Compute Nodes in the idle state. + :type idle: int + :param offline: Required. The number of Compute Nodes in the offline + state. + :type offline: int + :param preempted: Required. The number of Compute Nodes in the preempted + state. + :type preempted: int + :param rebooting: Required. The count of Compute Nodes in the rebooting + state. + :type rebooting: int + :param reimaging: Required. The number of Compute Nodes in the reimaging + state. + :type reimaging: int + :param running: Required. The number of Compute Nodes in the running + state. + :type running: int + :param starting: Required. The number of Compute Nodes in the starting + state. + :type starting: int + :param start_task_failed: Required. The number of Compute Nodes in the + startTaskFailed state. + :type start_task_failed: int + :param leaving_pool: Required. The number of Compute Nodes in the + leavingPool state. + :type leaving_pool: int + :param unknown: Required. The number of Compute Nodes in the unknown + state. + :type unknown: int + :param unusable: Required. The number of Compute Nodes in the unusable + state. + :type unusable: int + :param waiting_for_start_task: Required. The number of Compute Nodes in + the waitingForStartTask state. + :type waiting_for_start_task: int + :param total: Required. The total number of Compute Nodes. + :type total: int + """ + + _validation = { + 'creating': {'required': True}, + 'idle': {'required': True}, + 'offline': {'required': True}, + 'preempted': {'required': True}, + 'rebooting': {'required': True}, + 'reimaging': {'required': True}, + 'running': {'required': True}, + 'starting': {'required': True}, + 'start_task_failed': {'required': True}, + 'leaving_pool': {'required': True}, + 'unknown': {'required': True}, + 'unusable': {'required': True}, + 'waiting_for_start_task': {'required': True}, + 'total': {'required': True}, + } + + _attribute_map = { + 'creating': {'key': 'creating', 'type': 'int'}, + 'idle': {'key': 'idle', 'type': 'int'}, + 'offline': {'key': 'offline', 'type': 'int'}, + 'preempted': {'key': 'preempted', 'type': 'int'}, + 'rebooting': {'key': 'rebooting', 'type': 'int'}, + 'reimaging': {'key': 'reimaging', 'type': 'int'}, + 'running': {'key': 'running', 'type': 'int'}, + 'starting': {'key': 'starting', 'type': 'int'}, + 'start_task_failed': {'key': 'startTaskFailed', 'type': 'int'}, + 'leaving_pool': {'key': 'leavingPool', 'type': 'int'}, + 'unknown': {'key': 'unknown', 'type': 'int'}, + 'unusable': {'key': 'unusable', 'type': 'int'}, + 'waiting_for_start_task': {'key': 'waitingForStartTask', 'type': 'int'}, + 'total': {'key': 'total', 'type': 'int'}, + } + + def __init__(self, *, creating: int, idle: int, offline: int, preempted: int, rebooting: int, reimaging: int, running: int, starting: int, start_task_failed: int, leaving_pool: int, unknown: int, unusable: int, waiting_for_start_task: int, total: int, **kwargs) -> None: + super(NodeCounts, self).__init__(**kwargs) + self.creating = creating + self.idle = idle + self.offline = offline + self.preempted = preempted + self.rebooting = rebooting + self.reimaging = reimaging + self.running = running + self.starting = starting + self.start_task_failed = start_task_failed + self.leaving_pool = leaving_pool + self.unknown = unknown + self.unusable = unusable + self.waiting_for_start_task = waiting_for_start_task + self.total = total diff --git a/azext/generated/sdk/batch/v2019_08_01/models/node_disable_scheduling_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/node_disable_scheduling_parameter.py new file mode 100644 index 00000000..1104c06c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/node_disable_scheduling_parameter.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeDisableSchedulingParameter(Model): + """Options for disabling scheduling on a Compute Node. + + :param node_disable_scheduling_option: What to do with currently running + Tasks when disabling Task scheduling on the Compute Node. The default + value is requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion' + :type node_disable_scheduling_option: str or + ~azure.batch.models.DisableComputeNodeSchedulingOption + """ + + _attribute_map = { + 'node_disable_scheduling_option': {'key': 'nodeDisableSchedulingOption', 'type': 'DisableComputeNodeSchedulingOption'}, + } + + def __init__(self, **kwargs): + super(NodeDisableSchedulingParameter, self).__init__(**kwargs) + self.node_disable_scheduling_option = kwargs.get('node_disable_scheduling_option', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/node_disable_scheduling_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/node_disable_scheduling_parameter_py3.py new file mode 100644 index 00000000..2e621e48 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/node_disable_scheduling_parameter_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeDisableSchedulingParameter(Model): + """Options for disabling scheduling on a Compute Node. + + :param node_disable_scheduling_option: What to do with currently running + Tasks when disabling Task scheduling on the Compute Node. The default + value is requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion' + :type node_disable_scheduling_option: str or + ~azure.batch.models.DisableComputeNodeSchedulingOption + """ + + _attribute_map = { + 'node_disable_scheduling_option': {'key': 'nodeDisableSchedulingOption', 'type': 'DisableComputeNodeSchedulingOption'}, + } + + def __init__(self, *, node_disable_scheduling_option=None, **kwargs) -> None: + super(NodeDisableSchedulingParameter, self).__init__(**kwargs) + self.node_disable_scheduling_option = node_disable_scheduling_option diff --git a/azext/generated/sdk/batch/v2019_08_01/models/node_file.py b/azext/generated/sdk/batch/v2019_08_01/models/node_file.py new file mode 100644 index 00000000..79706922 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/node_file.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeFile(Model): + """Information about a file or directory on a Compute Node. + + :param name: The file path. + :type name: str + :param url: The URL of the file. + :type url: str + :param is_directory: Whether the object represents a directory. + :type is_directory: bool + :param properties: The file properties. + :type properties: ~azure.batch.models.FileProperties + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, + 'properties': {'key': 'properties', 'type': 'FileProperties'}, + } + + def __init__(self, **kwargs): + super(NodeFile, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.url = kwargs.get('url', None) + self.is_directory = kwargs.get('is_directory', None) + self.properties = kwargs.get('properties', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/node_file_paged.py b/azext/generated/sdk/batch/v2019_08_01/models/node_file_paged.py new file mode 100644 index 00000000..4463c944 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/node_file_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class NodeFilePaged(Paged): + """ + A paging container for iterating over a list of :class:`NodeFile ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[NodeFile]'} + } + + def __init__(self, *args, **kwargs): + + super(NodeFilePaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/node_file_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/node_file_py3.py new file mode 100644 index 00000000..c3608299 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/node_file_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeFile(Model): + """Information about a file or directory on a Compute Node. + + :param name: The file path. + :type name: str + :param url: The URL of the file. + :type url: str + :param is_directory: Whether the object represents a directory. + :type is_directory: bool + :param properties: The file properties. + :type properties: ~azure.batch.models.FileProperties + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, + 'properties': {'key': 'properties', 'type': 'FileProperties'}, + } + + def __init__(self, *, name: str=None, url: str=None, is_directory: bool=None, properties=None, **kwargs) -> None: + super(NodeFile, self).__init__(**kwargs) + self.name = name + self.url = url + self.is_directory = is_directory + self.properties = properties diff --git a/azext/generated/sdk/batch/v2019_08_01/models/node_reboot_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/node_reboot_parameter.py new file mode 100644 index 00000000..f9fd7842 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/node_reboot_parameter.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeRebootParameter(Model): + """Options for rebooting a Compute Node. + + :param node_reboot_option: When to reboot the Compute Node and what to do + with currently running Tasks. The default value is requeue. Possible + values include: 'requeue', 'terminate', 'taskCompletion', 'retainedData' + :type node_reboot_option: str or + ~azure.batch.models.ComputeNodeRebootOption + """ + + _attribute_map = { + 'node_reboot_option': {'key': 'nodeRebootOption', 'type': 'ComputeNodeRebootOption'}, + } + + def __init__(self, **kwargs): + super(NodeRebootParameter, self).__init__(**kwargs) + self.node_reboot_option = kwargs.get('node_reboot_option', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/node_reboot_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/node_reboot_parameter_py3.py new file mode 100644 index 00000000..b2d5d1dc --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/node_reboot_parameter_py3.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeRebootParameter(Model): + """Options for rebooting a Compute Node. + + :param node_reboot_option: When to reboot the Compute Node and what to do + with currently running Tasks. The default value is requeue. Possible + values include: 'requeue', 'terminate', 'taskCompletion', 'retainedData' + :type node_reboot_option: str or + ~azure.batch.models.ComputeNodeRebootOption + """ + + _attribute_map = { + 'node_reboot_option': {'key': 'nodeRebootOption', 'type': 'ComputeNodeRebootOption'}, + } + + def __init__(self, *, node_reboot_option=None, **kwargs) -> None: + super(NodeRebootParameter, self).__init__(**kwargs) + self.node_reboot_option = node_reboot_option diff --git a/azext/generated/sdk/batch/v2019_08_01/models/node_reimage_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/node_reimage_parameter.py new file mode 100644 index 00000000..7cb55458 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/node_reimage_parameter.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeReimageParameter(Model): + """Options for reimaging a Compute Node. + + :param node_reimage_option: When to reimage the Compute Node and what to + do with currently running Tasks. The default value is requeue. Possible + values include: 'requeue', 'terminate', 'taskCompletion', 'retainedData' + :type node_reimage_option: str or + ~azure.batch.models.ComputeNodeReimageOption + """ + + _attribute_map = { + 'node_reimage_option': {'key': 'nodeReimageOption', 'type': 'ComputeNodeReimageOption'}, + } + + def __init__(self, **kwargs): + super(NodeReimageParameter, self).__init__(**kwargs) + self.node_reimage_option = kwargs.get('node_reimage_option', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/node_reimage_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/node_reimage_parameter_py3.py new file mode 100644 index 00000000..11aad8cf --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/node_reimage_parameter_py3.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeReimageParameter(Model): + """Options for reimaging a Compute Node. + + :param node_reimage_option: When to reimage the Compute Node and what to + do with currently running Tasks. The default value is requeue. Possible + values include: 'requeue', 'terminate', 'taskCompletion', 'retainedData' + :type node_reimage_option: str or + ~azure.batch.models.ComputeNodeReimageOption + """ + + _attribute_map = { + 'node_reimage_option': {'key': 'nodeReimageOption', 'type': 'ComputeNodeReimageOption'}, + } + + def __init__(self, *, node_reimage_option=None, **kwargs) -> None: + super(NodeReimageParameter, self).__init__(**kwargs) + self.node_reimage_option = node_reimage_option diff --git a/azext/generated/sdk/batch/v2019_08_01/models/node_remove_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/node_remove_parameter.py new file mode 100644 index 00000000..9082b65c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/node_remove_parameter.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeRemoveParameter(Model): + """Options for removing Compute Nodes from a Pool. + + All required parameters must be populated in order to send to Azure. + + :param node_list: Required. A list containing the IDs of the Compute Nodes + to be removed from the specified Pool. + :type node_list: list[str] + :param resize_timeout: The timeout for removal of Compute Nodes to the + Pool. The default value is 15 minutes. The minimum value is 5 minutes. If + you specify a value less than 5 minutes, the Batch service returns an + error; if you are calling the REST API directly, the HTTP status code is + 400 (Bad Request). + :type resize_timeout: timedelta + :param node_deallocation_option: Determines what to do with a Compute Node + and its running task(s) after it has been selected for deallocation. The + default value is requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion', 'retainedData' + :type node_deallocation_option: str or + ~azure.batch.models.ComputeNodeDeallocationOption + """ + + _validation = { + 'node_list': {'required': True, 'max_items': 100}, + } + + _attribute_map = { + 'node_list': {'key': 'nodeList', 'type': '[str]'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, + } + + def __init__(self, **kwargs): + super(NodeRemoveParameter, self).__init__(**kwargs) + self.node_list = kwargs.get('node_list', None) + self.resize_timeout = kwargs.get('resize_timeout', None) + self.node_deallocation_option = kwargs.get('node_deallocation_option', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/node_remove_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/node_remove_parameter_py3.py new file mode 100644 index 00000000..5969c5e4 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/node_remove_parameter_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeRemoveParameter(Model): + """Options for removing Compute Nodes from a Pool. + + All required parameters must be populated in order to send to Azure. + + :param node_list: Required. A list containing the IDs of the Compute Nodes + to be removed from the specified Pool. + :type node_list: list[str] + :param resize_timeout: The timeout for removal of Compute Nodes to the + Pool. The default value is 15 minutes. The minimum value is 5 minutes. If + you specify a value less than 5 minutes, the Batch service returns an + error; if you are calling the REST API directly, the HTTP status code is + 400 (Bad Request). + :type resize_timeout: timedelta + :param node_deallocation_option: Determines what to do with a Compute Node + and its running task(s) after it has been selected for deallocation. The + default value is requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion', 'retainedData' + :type node_deallocation_option: str or + ~azure.batch.models.ComputeNodeDeallocationOption + """ + + _validation = { + 'node_list': {'required': True, 'max_items': 100}, + } + + _attribute_map = { + 'node_list': {'key': 'nodeList', 'type': '[str]'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, + } + + def __init__(self, *, node_list, resize_timeout=None, node_deallocation_option=None, **kwargs) -> None: + super(NodeRemoveParameter, self).__init__(**kwargs) + self.node_list = node_list + self.resize_timeout = resize_timeout + self.node_deallocation_option = node_deallocation_option diff --git a/azext/generated/sdk/batch/v2019_08_01/models/node_update_user_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/node_update_user_parameter.py new file mode 100644 index 00000000..c9e90ab6 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/node_update_user_parameter.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeUpdateUserParameter(Model): + """The set of changes to be made to a user Account on a Compute Node. + + :param password: The password of the Account. The password is required for + Windows Compute Nodes (those created with 'cloudServiceConfiguration', or + created with 'virtualMachineConfiguration' using a Windows Image + reference). For Linux Compute Nodes, the password can optionally be + specified along with the sshPublicKey property. If omitted, any existing + password is removed. + :type password: str + :param expiry_time: The time at which the Account should expire. If + omitted, the default is 1 day from the current time. For Linux Compute + Nodes, the expiryTime has a precision up to a day. + :type expiry_time: datetime + :param ssh_public_key: The SSH public key that can be used for remote + login to the Compute Node. The public key should be compatible with + OpenSSH encoding and should be base 64 encoded. This property can be + specified only for Linux Compute Nodes. If this is specified for a Windows + Compute Node, then the Batch service rejects the request; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + If omitted, any existing SSH public key is removed. + :type ssh_public_key: str + """ + + _attribute_map = { + 'password': {'key': 'password', 'type': 'str'}, + 'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'}, + 'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(NodeUpdateUserParameter, self).__init__(**kwargs) + self.password = kwargs.get('password', None) + self.expiry_time = kwargs.get('expiry_time', None) + self.ssh_public_key = kwargs.get('ssh_public_key', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/node_update_user_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/node_update_user_parameter_py3.py new file mode 100644 index 00000000..9d538529 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/node_update_user_parameter_py3.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeUpdateUserParameter(Model): + """The set of changes to be made to a user Account on a Compute Node. + + :param password: The password of the Account. The password is required for + Windows Compute Nodes (those created with 'cloudServiceConfiguration', or + created with 'virtualMachineConfiguration' using a Windows Image + reference). For Linux Compute Nodes, the password can optionally be + specified along with the sshPublicKey property. If omitted, any existing + password is removed. + :type password: str + :param expiry_time: The time at which the Account should expire. If + omitted, the default is 1 day from the current time. For Linux Compute + Nodes, the expiryTime has a precision up to a day. + :type expiry_time: datetime + :param ssh_public_key: The SSH public key that can be used for remote + login to the Compute Node. The public key should be compatible with + OpenSSH encoding and should be base 64 encoded. This property can be + specified only for Linux Compute Nodes. If this is specified for a Windows + Compute Node, then the Batch service rejects the request; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + If omitted, any existing SSH public key is removed. + :type ssh_public_key: str + """ + + _attribute_map = { + 'password': {'key': 'password', 'type': 'str'}, + 'expiry_time': {'key': 'expiryTime', 'type': 'iso-8601'}, + 'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'}, + } + + def __init__(self, *, password: str=None, expiry_time=None, ssh_public_key: str=None, **kwargs) -> None: + super(NodeUpdateUserParameter, self).__init__(**kwargs) + self.password = password + self.expiry_time = expiry_time + self.ssh_public_key = ssh_public_key diff --git a/azext/generated/sdk/batch/v2019_08_01/models/output_file.py b/azext/generated/sdk/batch/v2019_08_01/models/output_file.py new file mode 100644 index 00000000..ca0d324e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/output_file.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFile(Model): + """A specification for uploading files from an Azure Batch Compute Node to + another location after the Batch service has finished executing the Task + process. + + All required parameters must be populated in order to send to Azure. + + :param file_pattern: Required. A pattern indicating which file(s) to + upload. Both relative and absolute paths are supported. Relative paths are + relative to the Task working directory. The following wildcards are + supported: * matches 0 or more characters (for example pattern abc* would + match abc or abcdef), ** matches any directory, ? matches any single + character, [abc] matches one character in the brackets, and [a-c] matches + one character in the range. Brackets can include a negation to match any + character not specified (for example [!abc] matches any character but a, + b, or c). If a file name starts with "." it is ignored by default but may + be matched by specifying it explicitly (for example *.gif will not match + .a.gif, but .*.gif will). A simple example: **\\*.txt matches any file + that does not start in '.' and ends with .txt in the Task working + directory or any subdirectory. If the filename contains a wildcard + character it can be escaped using brackets (for example abc[*] would match + a file named abc*). Note that both \\ and / are treated as directory + separators on Windows, but only / is on Linux. Environment variables + (%var% on Windows or $var on Linux) are expanded prior to the pattern + being applied. + :type file_pattern: str + :param destination: Required. The destination for the output file(s). + :type destination: ~azure.batch.models.OutputFileDestination + :param upload_options: Required. Additional options for the upload + operation, including under what conditions to perform the upload. + :type upload_options: ~azure.batch.models.OutputFileUploadOptions + """ + + _validation = { + 'file_pattern': {'required': True}, + 'destination': {'required': True}, + 'upload_options': {'required': True}, + } + + _attribute_map = { + 'file_pattern': {'key': 'filePattern', 'type': 'str'}, + 'destination': {'key': 'destination', 'type': 'OutputFileDestination'}, + 'upload_options': {'key': 'uploadOptions', 'type': 'OutputFileUploadOptions'}, + } + + def __init__(self, **kwargs): + super(OutputFile, self).__init__(**kwargs) + self.file_pattern = kwargs.get('file_pattern', None) + self.destination = kwargs.get('destination', None) + self.upload_options = kwargs.get('upload_options', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/output_file_blob_container_destination.py b/azext/generated/sdk/batch/v2019_08_01/models/output_file_blob_container_destination.py new file mode 100644 index 00000000..ee86a589 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/output_file_blob_container_destination.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileBlobContainerDestination(Model): + """Specifies a file upload destination within an Azure blob storage container. + + All required parameters must be populated in order to send to Azure. + + :param path: The destination blob or virtual directory within the Azure + Storage container. If filePattern refers to a specific file (i.e. contains + no wildcards), then path is the name of the blob to which to upload that + file. If filePattern contains one or more wildcards (and therefore may + match multiple files), then path is the name of the blob virtual directory + (which is prepended to each blob name) to which to upload the file(s). If + omitted, file(s) are uploaded to the root of the container with a blob + name matching their file name. + :type path: str + :param container_url: Required. The URL of the container within Azure Blob + Storage to which to upload the file(s). The URL must include a Shared + Access Signature (SAS) granting write permissions to the container. + :type container_url: str + """ + + _validation = { + 'container_url': {'required': True}, + } + + _attribute_map = { + 'path': {'key': 'path', 'type': 'str'}, + 'container_url': {'key': 'containerUrl', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(OutputFileBlobContainerDestination, self).__init__(**kwargs) + self.path = kwargs.get('path', None) + self.container_url = kwargs.get('container_url', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/output_file_blob_container_destination_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/output_file_blob_container_destination_py3.py new file mode 100644 index 00000000..3f0c9ce0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/output_file_blob_container_destination_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileBlobContainerDestination(Model): + """Specifies a file upload destination within an Azure blob storage container. + + All required parameters must be populated in order to send to Azure. + + :param path: The destination blob or virtual directory within the Azure + Storage container. If filePattern refers to a specific file (i.e. contains + no wildcards), then path is the name of the blob to which to upload that + file. If filePattern contains one or more wildcards (and therefore may + match multiple files), then path is the name of the blob virtual directory + (which is prepended to each blob name) to which to upload the file(s). If + omitted, file(s) are uploaded to the root of the container with a blob + name matching their file name. + :type path: str + :param container_url: Required. The URL of the container within Azure Blob + Storage to which to upload the file(s). The URL must include a Shared + Access Signature (SAS) granting write permissions to the container. + :type container_url: str + """ + + _validation = { + 'container_url': {'required': True}, + } + + _attribute_map = { + 'path': {'key': 'path', 'type': 'str'}, + 'container_url': {'key': 'containerUrl', 'type': 'str'}, + } + + def __init__(self, *, container_url: str, path: str=None, **kwargs) -> None: + super(OutputFileBlobContainerDestination, self).__init__(**kwargs) + self.path = path + self.container_url = container_url diff --git a/azext/generated/sdk/batch/v2019_08_01/models/output_file_destination.py b/azext/generated/sdk/batch/v2019_08_01/models/output_file_destination.py new file mode 100644 index 00000000..1033743c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/output_file_destination.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileDestination(Model): + """The destination to which a file should be uploaded. + + :param container: A location in Azure blob storage to which files are + uploaded. + :type container: ~azure.batch.models.OutputFileBlobContainerDestination + """ + + _attribute_map = { + 'container': {'key': 'container', 'type': 'OutputFileBlobContainerDestination'}, + } + + def __init__(self, **kwargs): + super(OutputFileDestination, self).__init__(**kwargs) + self.container = kwargs.get('container', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/output_file_destination_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/output_file_destination_py3.py new file mode 100644 index 00000000..e7c652b6 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/output_file_destination_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileDestination(Model): + """The destination to which a file should be uploaded. + + :param container: A location in Azure blob storage to which files are + uploaded. + :type container: ~azure.batch.models.OutputFileBlobContainerDestination + """ + + _attribute_map = { + 'container': {'key': 'container', 'type': 'OutputFileBlobContainerDestination'}, + } + + def __init__(self, *, container=None, **kwargs) -> None: + super(OutputFileDestination, self).__init__(**kwargs) + self.container = container diff --git a/azext/generated/sdk/batch/v2019_08_01/models/output_file_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/output_file_py3.py new file mode 100644 index 00000000..229e47f8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/output_file_py3.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFile(Model): + """A specification for uploading files from an Azure Batch Compute Node to + another location after the Batch service has finished executing the Task + process. + + All required parameters must be populated in order to send to Azure. + + :param file_pattern: Required. A pattern indicating which file(s) to + upload. Both relative and absolute paths are supported. Relative paths are + relative to the Task working directory. The following wildcards are + supported: * matches 0 or more characters (for example pattern abc* would + match abc or abcdef), ** matches any directory, ? matches any single + character, [abc] matches one character in the brackets, and [a-c] matches + one character in the range. Brackets can include a negation to match any + character not specified (for example [!abc] matches any character but a, + b, or c). If a file name starts with "." it is ignored by default but may + be matched by specifying it explicitly (for example *.gif will not match + .a.gif, but .*.gif will). A simple example: **\\*.txt matches any file + that does not start in '.' and ends with .txt in the Task working + directory or any subdirectory. If the filename contains a wildcard + character it can be escaped using brackets (for example abc[*] would match + a file named abc*). Note that both \\ and / are treated as directory + separators on Windows, but only / is on Linux. Environment variables + (%var% on Windows or $var on Linux) are expanded prior to the pattern + being applied. + :type file_pattern: str + :param destination: Required. The destination for the output file(s). + :type destination: ~azure.batch.models.OutputFileDestination + :param upload_options: Required. Additional options for the upload + operation, including under what conditions to perform the upload. + :type upload_options: ~azure.batch.models.OutputFileUploadOptions + """ + + _validation = { + 'file_pattern': {'required': True}, + 'destination': {'required': True}, + 'upload_options': {'required': True}, + } + + _attribute_map = { + 'file_pattern': {'key': 'filePattern', 'type': 'str'}, + 'destination': {'key': 'destination', 'type': 'OutputFileDestination'}, + 'upload_options': {'key': 'uploadOptions', 'type': 'OutputFileUploadOptions'}, + } + + def __init__(self, *, file_pattern: str, destination, upload_options, **kwargs) -> None: + super(OutputFile, self).__init__(**kwargs) + self.file_pattern = file_pattern + self.destination = destination + self.upload_options = upload_options diff --git a/azext/generated/sdk/batch/v2019_08_01/models/output_file_upload_options.py b/azext/generated/sdk/batch/v2019_08_01/models/output_file_upload_options.py new file mode 100644 index 00000000..a5170ff6 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/output_file_upload_options.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileUploadOptions(Model): + """Details about an output file upload operation, including under what + conditions to perform the upload. + + All required parameters must be populated in order to send to Azure. + + :param upload_condition: Required. The conditions under which the Task + output file or set of files should be uploaded. The default is + taskcompletion. Possible values include: 'taskSuccess', 'taskFailure', + 'taskCompletion' + :type upload_condition: str or + ~azure.batch.models.OutputFileUploadCondition + """ + + _validation = { + 'upload_condition': {'required': True}, + } + + _attribute_map = { + 'upload_condition': {'key': 'uploadCondition', 'type': 'OutputFileUploadCondition'}, + } + + def __init__(self, **kwargs): + super(OutputFileUploadOptions, self).__init__(**kwargs) + self.upload_condition = kwargs.get('upload_condition', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/output_file_upload_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/output_file_upload_options_py3.py new file mode 100644 index 00000000..ed58bdcc --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/output_file_upload_options_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OutputFileUploadOptions(Model): + """Details about an output file upload operation, including under what + conditions to perform the upload. + + All required parameters must be populated in order to send to Azure. + + :param upload_condition: Required. The conditions under which the Task + output file or set of files should be uploaded. The default is + taskcompletion. Possible values include: 'taskSuccess', 'taskFailure', + 'taskCompletion' + :type upload_condition: str or + ~azure.batch.models.OutputFileUploadCondition + """ + + _validation = { + 'upload_condition': {'required': True}, + } + + _attribute_map = { + 'upload_condition': {'key': 'uploadCondition', 'type': 'OutputFileUploadCondition'}, + } + + def __init__(self, *, upload_condition, **kwargs) -> None: + super(OutputFileUploadOptions, self).__init__(**kwargs) + self.upload_condition = upload_condition diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_add_options.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_add_options.py new file mode 100644 index 00000000..04d968a8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_add_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolAddOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_add_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_add_options_py3.py new file mode 100644 index 00000000..62b3e62b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_add_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolAddOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_add_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_add_parameter.py new file mode 100644 index 00000000..6157bea0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_add_parameter.py @@ -0,0 +1,206 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolAddParameter(Model): + """A Pool in the Azure Batch service to add. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the Pool within the + Account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two Pool IDs within an Account that differ only by case). + :type id: str + :param display_name: The display name for the Pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param vm_size: Required. The size of virtual machines in the Pool. All + virtual machines in a Pool are the same size. For information about + available sizes of virtual machines for Cloud Services Pools (pools + created with cloudServiceConfiguration), see Sizes for Cloud Services + (https://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). + Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and + A2V2. For information about available VM sizes for Pools using Images from + the Virtual Machines Marketplace (pools created with + virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) + (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) + or Sizes for Virtual Machines (Windows) + (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). + Batch supports all Azure VM sizes except STANDARD_A0 and those with + premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the Pool. This property and virtualMachineConfiguration are mutually + exclusive and one of the properties must be specified. This property + cannot be specified if the Batch Account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the Pool. This property and cloudServiceConfiguration are mutually + exclusive and one of the properties must be specified. + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param resize_timeout: The timeout for allocation of Compute Nodes to the + Pool. This timeout applies only to manual scaling; it has no effect when + enableAutoScale is set to true. The default value is 15 minutes. The + minimum value is 5 minutes. If you specify a value less than 5 minutes, + the Batch service returns an error; if you are calling the REST API + directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param target_dedicated_nodes: The desired number of dedicated Compute + Nodes in the Pool. This property must not be specified if enableAutoScale + is set to true. If enableAutoScale is set to false, then you must set + either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + Compute Nodes in the Pool. This property must not be specified if + enableAutoScale is set to true. If enableAutoScale is set to false, then + you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the Pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + property is required and the Pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: A formula for the desired number of Compute + Nodes in the Pool. This property must not be specified if enableAutoScale + is set to false. It is required if enableAutoScale is set to true. The + formula is checked for validity before the Pool is created. If the formula + is not valid, the Batch service rejects the request with detailed error + information. For more information about specifying this formula, see + 'Automatically scale Compute Nodes in an Azure Batch Pool' + (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the Pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service returns an error; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + :type auto_scale_evaluation_interval: timedelta + :param enable_inter_node_communication: Whether the Pool permits direct + communication between Compute Nodes. Enabling inter-node communication + limits the maximum size of the Pool due to deployment restrictions on the + Compute Nodes of the Pool. This may result in the Pool not reaching its + desired size. The default value is false. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the Pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A Task specified to run on each Compute Node as it + joins the Pool. The Task runs when the Compute Node is added to the Pool + or when the Compute Node is restarted. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: The list of Certificates to be installed on + each Compute Node in the Pool. For Windows Nodes, the Batch service + installs the Certificates to the specified Certificate store and location. + For Linux Compute Nodes, the Certificates are stored in a directory inside + the Task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this + location. For Certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and Certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of Packages to be + installed on each Compute Node in the Pool. Changes to Package references + affect all new Nodes joining the Pool, but do not affect Compute Nodes + that are already in the Pool until they are rebooted or reimaged. There is + a maximum of 10 Package references on any given Pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each Compute Node in the Pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + Pool creation will fail. + :type application_licenses: list[str] + :param max_tasks_per_node: The maximum number of Tasks that can run + concurrently on a single Compute Node in the Pool. The default value is 1. + The maximum value is the smaller of 4 times the number of cores of the + vmSize of the Pool or 256. + :type max_tasks_per_node: int + :param task_scheduling_policy: How Tasks are distributed across Compute + Nodes in a Pool. If not specified, the default is spread. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param user_accounts: The list of user Accounts to be created on each + Compute Node in the Pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the Pool as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param mount_configuration: Mount storage using specified file system for + the entire lifetime of the pool. Mount the storage using Azure fileshare, + NFS, CIFS or Blobfuse based file system. + :type mount_configuration: list[~azure.batch.models.MountConfiguration] + """ + + _validation = { + 'id': {'required': True}, + 'vm_size': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'mount_configuration': {'key': 'mountConfiguration', 'type': '[MountConfiguration]'}, + } + + def __init__(self, **kwargs): + super(PoolAddParameter, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.vm_size = kwargs.get('vm_size', None) + self.cloud_service_configuration = kwargs.get('cloud_service_configuration', None) + self.virtual_machine_configuration = kwargs.get('virtual_machine_configuration', None) + self.resize_timeout = kwargs.get('resize_timeout', None) + self.target_dedicated_nodes = kwargs.get('target_dedicated_nodes', None) + self.target_low_priority_nodes = kwargs.get('target_low_priority_nodes', None) + self.enable_auto_scale = kwargs.get('enable_auto_scale', None) + self.auto_scale_formula = kwargs.get('auto_scale_formula', None) + self.auto_scale_evaluation_interval = kwargs.get('auto_scale_evaluation_interval', None) + self.enable_inter_node_communication = kwargs.get('enable_inter_node_communication', None) + self.network_configuration = kwargs.get('network_configuration', None) + self.start_task = kwargs.get('start_task', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.application_licenses = kwargs.get('application_licenses', None) + self.max_tasks_per_node = kwargs.get('max_tasks_per_node', None) + self.task_scheduling_policy = kwargs.get('task_scheduling_policy', None) + self.user_accounts = kwargs.get('user_accounts', None) + self.metadata = kwargs.get('metadata', None) + self.mount_configuration = kwargs.get('mount_configuration', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_add_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_add_parameter_py3.py new file mode 100644 index 00000000..3aa6a052 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_add_parameter_py3.py @@ -0,0 +1,206 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolAddParameter(Model): + """A Pool in the Azure Batch service to add. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the Pool within the + Account. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two Pool IDs within an Account that differ only by case). + :type id: str + :param display_name: The display name for the Pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param vm_size: Required. The size of virtual machines in the Pool. All + virtual machines in a Pool are the same size. For information about + available sizes of virtual machines for Cloud Services Pools (pools + created with cloudServiceConfiguration), see Sizes for Cloud Services + (https://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). + Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and + A2V2. For information about available VM sizes for Pools using Images from + the Virtual Machines Marketplace (pools created with + virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) + (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) + or Sizes for Virtual Machines (Windows) + (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). + Batch supports all Azure VM sizes except STANDARD_A0 and those with + premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the Pool. This property and virtualMachineConfiguration are mutually + exclusive and one of the properties must be specified. This property + cannot be specified if the Batch Account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the Pool. This property and cloudServiceConfiguration are mutually + exclusive and one of the properties must be specified. + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param resize_timeout: The timeout for allocation of Compute Nodes to the + Pool. This timeout applies only to manual scaling; it has no effect when + enableAutoScale is set to true. The default value is 15 minutes. The + minimum value is 5 minutes. If you specify a value less than 5 minutes, + the Batch service returns an error; if you are calling the REST API + directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param target_dedicated_nodes: The desired number of dedicated Compute + Nodes in the Pool. This property must not be specified if enableAutoScale + is set to true. If enableAutoScale is set to false, then you must set + either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + Compute Nodes in the Pool. This property must not be specified if + enableAutoScale is set to true. If enableAutoScale is set to false, then + you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the Pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + property is required and the Pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: A formula for the desired number of Compute + Nodes in the Pool. This property must not be specified if enableAutoScale + is set to false. It is required if enableAutoScale is set to true. The + formula is checked for validity before the Pool is created. If the formula + is not valid, the Batch service rejects the request with detailed error + information. For more information about specifying this formula, see + 'Automatically scale Compute Nodes in an Azure Batch Pool' + (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the Pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service returns an error; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + :type auto_scale_evaluation_interval: timedelta + :param enable_inter_node_communication: Whether the Pool permits direct + communication between Compute Nodes. Enabling inter-node communication + limits the maximum size of the Pool due to deployment restrictions on the + Compute Nodes of the Pool. This may result in the Pool not reaching its + desired size. The default value is false. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the Pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A Task specified to run on each Compute Node as it + joins the Pool. The Task runs when the Compute Node is added to the Pool + or when the Compute Node is restarted. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: The list of Certificates to be installed on + each Compute Node in the Pool. For Windows Nodes, the Batch service + installs the Certificates to the specified Certificate store and location. + For Linux Compute Nodes, the Certificates are stored in a directory inside + the Task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this + location. For Certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and Certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of Packages to be + installed on each Compute Node in the Pool. Changes to Package references + affect all new Nodes joining the Pool, but do not affect Compute Nodes + that are already in the Pool until they are rebooted or reimaged. There is + a maximum of 10 Package references on any given Pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each Compute Node in the Pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + Pool creation will fail. + :type application_licenses: list[str] + :param max_tasks_per_node: The maximum number of Tasks that can run + concurrently on a single Compute Node in the Pool. The default value is 1. + The maximum value is the smaller of 4 times the number of cores of the + vmSize of the Pool or 256. + :type max_tasks_per_node: int + :param task_scheduling_policy: How Tasks are distributed across Compute + Nodes in a Pool. If not specified, the default is spread. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param user_accounts: The list of user Accounts to be created on each + Compute Node in the Pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the Pool as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param mount_configuration: Mount storage using specified file system for + the entire lifetime of the pool. Mount the storage using Azure fileshare, + NFS, CIFS or Blobfuse based file system. + :type mount_configuration: list[~azure.batch.models.MountConfiguration] + """ + + _validation = { + 'id': {'required': True}, + 'vm_size': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'mount_configuration': {'key': 'mountConfiguration', 'type': '[MountConfiguration]'}, + } + + def __init__(self, *, id: str, vm_size: str, display_name: str=None, cloud_service_configuration=None, virtual_machine_configuration=None, resize_timeout=None, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, enable_auto_scale: bool=None, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, enable_inter_node_communication: bool=None, network_configuration=None, start_task=None, certificate_references=None, application_package_references=None, application_licenses=None, max_tasks_per_node: int=None, task_scheduling_policy=None, user_accounts=None, metadata=None, mount_configuration=None, **kwargs) -> None: + super(PoolAddParameter, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.vm_size = vm_size + self.cloud_service_configuration = cloud_service_configuration + self.virtual_machine_configuration = virtual_machine_configuration + self.resize_timeout = resize_timeout + self.target_dedicated_nodes = target_dedicated_nodes + self.target_low_priority_nodes = target_low_priority_nodes + self.enable_auto_scale = enable_auto_scale + self.auto_scale_formula = auto_scale_formula + self.auto_scale_evaluation_interval = auto_scale_evaluation_interval + self.enable_inter_node_communication = enable_inter_node_communication + self.network_configuration = network_configuration + self.start_task = start_task + self.certificate_references = certificate_references + self.application_package_references = application_package_references + self.application_licenses = application_licenses + self.max_tasks_per_node = max_tasks_per_node + self.task_scheduling_policy = task_scheduling_policy + self.user_accounts = user_accounts + self.metadata = metadata + self.mount_configuration = mount_configuration diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_delete_options.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_delete_options.py new file mode 100644 index 00000000..622241dc --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_delete_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolDeleteOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_delete_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_delete_options_py3.py new file mode 100644 index 00000000..7ca41443 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_delete_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolDeleteOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_disable_auto_scale_options.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_disable_auto_scale_options.py new file mode 100644 index 00000000..96b0bc7c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_disable_auto_scale_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolDisableAutoScaleOptions(Model): + """Additional parameters for disable_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolDisableAutoScaleOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_disable_auto_scale_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_disable_auto_scale_options_py3.py new file mode 100644 index 00000000..4a069bd0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_disable_auto_scale_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolDisableAutoScaleOptions(Model): + """Additional parameters for disable_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolDisableAutoScaleOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_enable_auto_scale_options.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_enable_auto_scale_options.py new file mode 100644 index 00000000..dd77582f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_enable_auto_scale_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEnableAutoScaleOptions(Model): + """Additional parameters for enable_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolEnableAutoScaleOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_enable_auto_scale_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_enable_auto_scale_options_py3.py new file mode 100644 index 00000000..507bd702 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_enable_auto_scale_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEnableAutoScaleOptions(Model): + """Additional parameters for enable_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolEnableAutoScaleOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_enable_auto_scale_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_enable_auto_scale_parameter.py new file mode 100644 index 00000000..bf94fb46 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_enable_auto_scale_parameter.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEnableAutoScaleParameter(Model): + """Options for enabling automatic scaling on a Pool. + + :param auto_scale_formula: The formula for the desired number of Compute + Nodes in the Pool. The formula is checked for validity before it is + applied to the Pool. If the formula is not valid, the Batch service + rejects the request with detailed error information. For more information + about specifying this formula, see Automatically scale Compute Nodes in an + Azure Batch Pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the Pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service rejects the request with an + invalid property value error; if you are calling the REST API directly, + the HTTP status code is 400 (Bad Request). If you specify a new interval, + then the existing autoscale evaluation schedule will be stopped and a new + autoscale evaluation schedule will be started, with its starting time + being the time when this request was issued. + :type auto_scale_evaluation_interval: timedelta + """ + + _attribute_map = { + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(PoolEnableAutoScaleParameter, self).__init__(**kwargs) + self.auto_scale_formula = kwargs.get('auto_scale_formula', None) + self.auto_scale_evaluation_interval = kwargs.get('auto_scale_evaluation_interval', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_enable_auto_scale_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_enable_auto_scale_parameter_py3.py new file mode 100644 index 00000000..1e55da37 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_enable_auto_scale_parameter_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEnableAutoScaleParameter(Model): + """Options for enabling automatic scaling on a Pool. + + :param auto_scale_formula: The formula for the desired number of Compute + Nodes in the Pool. The formula is checked for validity before it is + applied to the Pool. If the formula is not valid, the Batch service + rejects the request with detailed error information. For more information + about specifying this formula, see Automatically scale Compute Nodes in an + Azure Batch Pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the Pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service rejects the request with an + invalid property value error; if you are calling the REST API directly, + the HTTP status code is 400 (Bad Request). If you specify a new interval, + then the existing autoscale evaluation schedule will be stopped and a new + autoscale evaluation schedule will be started, with its starting time + being the time when this request was issued. + :type auto_scale_evaluation_interval: timedelta + """ + + _attribute_map = { + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + } + + def __init__(self, *, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, **kwargs) -> None: + super(PoolEnableAutoScaleParameter, self).__init__(**kwargs) + self.auto_scale_formula = auto_scale_formula + self.auto_scale_evaluation_interval = auto_scale_evaluation_interval diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_endpoint_configuration.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_endpoint_configuration.py new file mode 100644 index 00000000..0f0db164 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_endpoint_configuration.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEndpointConfiguration(Model): + """The endpoint configuration for a Pool. + + All required parameters must be populated in order to send to Azure. + + :param inbound_nat_pools: Required. A list of inbound NAT Pools that can + be used to address specific ports on an individual Compute Node + externally. The maximum number of inbound NAT Pools per Batch Pool is 5. + If the maximum number of inbound NAT Pools is exceeded the request fails + with HTTP status code 400. + :type inbound_nat_pools: list[~azure.batch.models.InboundNATPool] + """ + + _validation = { + 'inbound_nat_pools': {'required': True}, + } + + _attribute_map = { + 'inbound_nat_pools': {'key': 'inboundNATPools', 'type': '[InboundNATPool]'}, + } + + def __init__(self, **kwargs): + super(PoolEndpointConfiguration, self).__init__(**kwargs) + self.inbound_nat_pools = kwargs.get('inbound_nat_pools', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_endpoint_configuration_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_endpoint_configuration_py3.py new file mode 100644 index 00000000..90e7238b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_endpoint_configuration_py3.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEndpointConfiguration(Model): + """The endpoint configuration for a Pool. + + All required parameters must be populated in order to send to Azure. + + :param inbound_nat_pools: Required. A list of inbound NAT Pools that can + be used to address specific ports on an individual Compute Node + externally. The maximum number of inbound NAT Pools per Batch Pool is 5. + If the maximum number of inbound NAT Pools is exceeded the request fails + with HTTP status code 400. + :type inbound_nat_pools: list[~azure.batch.models.InboundNATPool] + """ + + _validation = { + 'inbound_nat_pools': {'required': True}, + } + + _attribute_map = { + 'inbound_nat_pools': {'key': 'inboundNATPools', 'type': '[InboundNATPool]'}, + } + + def __init__(self, *, inbound_nat_pools, **kwargs) -> None: + super(PoolEndpointConfiguration, self).__init__(**kwargs) + self.inbound_nat_pools = inbound_nat_pools diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_evaluate_auto_scale_options.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_evaluate_auto_scale_options.py new file mode 100644 index 00000000..5fbb7ad3 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_evaluate_auto_scale_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEvaluateAutoScaleOptions(Model): + """Additional parameters for evaluate_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolEvaluateAutoScaleOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_evaluate_auto_scale_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_evaluate_auto_scale_options_py3.py new file mode 100644 index 00000000..a2f09b9d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_evaluate_auto_scale_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEvaluateAutoScaleOptions(Model): + """Additional parameters for evaluate_auto_scale operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolEvaluateAutoScaleOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_evaluate_auto_scale_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_evaluate_auto_scale_parameter.py new file mode 100644 index 00000000..123ada7b --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_evaluate_auto_scale_parameter.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEvaluateAutoScaleParameter(Model): + """Options for evaluating an automatic scaling formula on a Pool. + + All required parameters must be populated in order to send to Azure. + + :param auto_scale_formula: Required. The formula for the desired number of + Compute Nodes in the Pool. The formula is validated and its results + calculated, but it is not applied to the Pool. To apply the formula to the + Pool, 'Enable automatic scaling on a Pool'. For more information about + specifying this formula, see Automatically scale Compute Nodes in an Azure + Batch Pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + """ + + _validation = { + 'auto_scale_formula': {'required': True}, + } + + _attribute_map = { + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(PoolEvaluateAutoScaleParameter, self).__init__(**kwargs) + self.auto_scale_formula = kwargs.get('auto_scale_formula', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_evaluate_auto_scale_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_evaluate_auto_scale_parameter_py3.py new file mode 100644 index 00000000..ced34d08 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_evaluate_auto_scale_parameter_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolEvaluateAutoScaleParameter(Model): + """Options for evaluating an automatic scaling formula on a Pool. + + All required parameters must be populated in order to send to Azure. + + :param auto_scale_formula: Required. The formula for the desired number of + Compute Nodes in the Pool. The formula is validated and its results + calculated, but it is not applied to the Pool. To apply the formula to the + Pool, 'Enable automatic scaling on a Pool'. For more information about + specifying this formula, see Automatically scale Compute Nodes in an Azure + Batch Pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + """ + + _validation = { + 'auto_scale_formula': {'required': True}, + } + + _attribute_map = { + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + } + + def __init__(self, *, auto_scale_formula: str, **kwargs) -> None: + super(PoolEvaluateAutoScaleParameter, self).__init__(**kwargs) + self.auto_scale_formula = auto_scale_formula diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_exists_options.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_exists_options.py new file mode 100644 index 00000000..feffd1c9 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_exists_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolExistsOptions(Model): + """Additional parameters for exists operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolExistsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_exists_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_exists_options_py3.py new file mode 100644 index 00000000..de152edb --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_exists_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolExistsOptions(Model): + """Additional parameters for exists operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolExistsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_get_all_lifetime_statistics_options.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_get_all_lifetime_statistics_options.py new file mode 100644 index 00000000..dbbbcf45 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_get_all_lifetime_statistics_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolGetAllLifetimeStatisticsOptions(Model): + """Additional parameters for get_all_lifetime_statistics operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolGetAllLifetimeStatisticsOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_get_all_lifetime_statistics_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_get_all_lifetime_statistics_options_py3.py new file mode 100644 index 00000000..0fc18020 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_get_all_lifetime_statistics_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolGetAllLifetimeStatisticsOptions(Model): + """Additional parameters for get_all_lifetime_statistics operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolGetAllLifetimeStatisticsOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_get_options.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_get_options.py new file mode 100644 index 00000000..a629c21e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_get_options.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_get_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_get_options_py3.py new file mode 100644 index 00000000..c0b04bd5 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_get_options_py3.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolGetOptions, self).__init__(**kwargs) + self.select = select + self.expand = expand + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_information.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_information.py new file mode 100644 index 00000000..5543b8da --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_information.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolInformation(Model): + """Specifies how a Job should be assigned to a Pool. + + :param pool_id: The ID of an existing Pool. All the Tasks of the Job will + run on the specified Pool. You must ensure that the Pool referenced by + this property exists. If the Pool does not exist at the time the Batch + service tries to schedule a Job, no Tasks for the Job will run until you + create a Pool with that id. Note that the Batch service will not reject + the Job request; it will simply not run Tasks until the Pool exists. You + must specify either the Pool ID or the auto Pool specification, but not + both. + :type pool_id: str + :param auto_pool_specification: Characteristics for a temporary 'auto + pool'. The Batch service will create this auto Pool when the Job is + submitted. If auto Pool creation fails, the Batch service moves the Job to + a completed state, and the Pool creation error is set in the Job's + scheduling error property. The Batch service manages the lifetime (both + creation and, unless keepAlive is specified, deletion) of the auto Pool. + Any user actions that affect the lifetime of the auto Pool while the Job + is active will result in unexpected behavior. You must specify either the + Pool ID or the auto Pool specification, but not both. + :type auto_pool_specification: ~azure.batch.models.AutoPoolSpecification + """ + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'auto_pool_specification': {'key': 'autoPoolSpecification', 'type': 'AutoPoolSpecification'}, + } + + def __init__(self, **kwargs): + super(PoolInformation, self).__init__(**kwargs) + self.pool_id = kwargs.get('pool_id', None) + self.auto_pool_specification = kwargs.get('auto_pool_specification', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_information_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_information_py3.py new file mode 100644 index 00000000..6074ea76 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_information_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolInformation(Model): + """Specifies how a Job should be assigned to a Pool. + + :param pool_id: The ID of an existing Pool. All the Tasks of the Job will + run on the specified Pool. You must ensure that the Pool referenced by + this property exists. If the Pool does not exist at the time the Batch + service tries to schedule a Job, no Tasks for the Job will run until you + create a Pool with that id. Note that the Batch service will not reject + the Job request; it will simply not run Tasks until the Pool exists. You + must specify either the Pool ID or the auto Pool specification, but not + both. + :type pool_id: str + :param auto_pool_specification: Characteristics for a temporary 'auto + pool'. The Batch service will create this auto Pool when the Job is + submitted. If auto Pool creation fails, the Batch service moves the Job to + a completed state, and the Pool creation error is set in the Job's + scheduling error property. The Batch service manages the lifetime (both + creation and, unless keepAlive is specified, deletion) of the auto Pool. + Any user actions that affect the lifetime of the auto Pool while the Job + is active will result in unexpected behavior. You must specify either the + Pool ID or the auto Pool specification, but not both. + :type auto_pool_specification: ~azure.batch.models.AutoPoolSpecification + """ + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'auto_pool_specification': {'key': 'autoPoolSpecification', 'type': 'AutoPoolSpecification'}, + } + + def __init__(self, *, pool_id: str=None, auto_pool_specification=None, **kwargs) -> None: + super(PoolInformation, self).__init__(**kwargs) + self.pool_id = pool_id + self.auto_pool_specification = auto_pool_specification diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_list_options.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_list_options.py new file mode 100644 index 00000000..add4b622 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_list_options.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Pools can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_list_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_list_options_py3.py new file mode 100644 index 00000000..d27ea2a6 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_list_options_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Pools can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.expand = expand + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_list_usage_metrics_options.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_list_usage_metrics_options.py new file mode 100644 index 00000000..5b52f71a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_list_usage_metrics_options.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolListUsageMetricsOptions(Model): + """Additional parameters for list_usage_metrics operation. + + :param start_time: The earliest time from which to include metrics. This + must be at least two and a half hours before the current time. If not + specified this defaults to the start time of the last aggregation interval + currently available. + :type start_time: datetime + :param end_time: The latest time from which to include metrics. This must + be at least two hours before the current time. If not specified this + defaults to the end time of the last aggregation interval currently + available. + :type end_time: datetime + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 results will be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'start_time': {'key': '', 'type': 'iso-8601'}, + 'end_time': {'key': '', 'type': 'iso-8601'}, + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolListUsageMetricsOptions, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.filter = kwargs.get('filter', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_list_usage_metrics_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_list_usage_metrics_options_py3.py new file mode 100644 index 00000000..2141cfa5 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_list_usage_metrics_options_py3.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolListUsageMetricsOptions(Model): + """Additional parameters for list_usage_metrics operation. + + :param start_time: The earliest time from which to include metrics. This + must be at least two and a half hours before the current time. If not + specified this defaults to the start time of the last aggregation interval + currently available. + :type start_time: datetime + :param end_time: The latest time from which to include metrics. This must + be at least two hours before the current time. If not specified this + defaults to the end time of the last aggregation interval currently + available. + :type end_time: datetime + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + :type filter: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 results will be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'start_time': {'key': '', 'type': 'iso-8601'}, + 'end_time': {'key': '', 'type': 'iso-8601'}, + 'filter': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, start_time=None, end_time=None, filter: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolListUsageMetricsOptions, self).__init__(**kwargs) + self.start_time = start_time + self.end_time = end_time + self.filter = filter + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_node_counts.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_node_counts.py new file mode 100644 index 00000000..cb2374fb --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_node_counts.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolNodeCounts(Model): + """The number of Compute Nodes in each state for a Pool. + + All required parameters must be populated in order to send to Azure. + + :param pool_id: Required. The ID of the Pool. + :type pool_id: str + :param dedicated: The number of dedicated Compute Nodes in each state. + :type dedicated: ~azure.batch.models.NodeCounts + :param low_priority: The number of low priority Compute Nodes in each + state. + :type low_priority: ~azure.batch.models.NodeCounts + """ + + _validation = { + 'pool_id': {'required': True}, + } + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'dedicated': {'key': 'dedicated', 'type': 'NodeCounts'}, + 'low_priority': {'key': 'lowPriority', 'type': 'NodeCounts'}, + } + + def __init__(self, **kwargs): + super(PoolNodeCounts, self).__init__(**kwargs) + self.pool_id = kwargs.get('pool_id', None) + self.dedicated = kwargs.get('dedicated', None) + self.low_priority = kwargs.get('low_priority', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_node_counts_paged.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_node_counts_paged.py new file mode 100644 index 00000000..67159e5d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_node_counts_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class PoolNodeCountsPaged(Paged): + """ + A paging container for iterating over a list of :class:`PoolNodeCounts ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[PoolNodeCounts]'} + } + + def __init__(self, *args, **kwargs): + + super(PoolNodeCountsPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_node_counts_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_node_counts_py3.py new file mode 100644 index 00000000..169ff57c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_node_counts_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolNodeCounts(Model): + """The number of Compute Nodes in each state for a Pool. + + All required parameters must be populated in order to send to Azure. + + :param pool_id: Required. The ID of the Pool. + :type pool_id: str + :param dedicated: The number of dedicated Compute Nodes in each state. + :type dedicated: ~azure.batch.models.NodeCounts + :param low_priority: The number of low priority Compute Nodes in each + state. + :type low_priority: ~azure.batch.models.NodeCounts + """ + + _validation = { + 'pool_id': {'required': True}, + } + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'dedicated': {'key': 'dedicated', 'type': 'NodeCounts'}, + 'low_priority': {'key': 'lowPriority', 'type': 'NodeCounts'}, + } + + def __init__(self, *, pool_id: str, dedicated=None, low_priority=None, **kwargs) -> None: + super(PoolNodeCounts, self).__init__(**kwargs) + self.pool_id = pool_id + self.dedicated = dedicated + self.low_priority = low_priority diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_patch_options.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_patch_options.py new file mode 100644 index 00000000..82b54aef --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_patch_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolPatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolPatchOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_patch_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_patch_options_py3.py new file mode 100644 index 00000000..ff9f10f0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_patch_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolPatchOptions(Model): + """Additional parameters for patch operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolPatchOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_patch_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_patch_parameter.py new file mode 100644 index 00000000..110c6db7 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_patch_parameter.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolPatchParameter(Model): + """The set of changes to be made to a Pool. + + :param start_task: A Task to run on each Compute Node as it joins the + Pool. The Task runs when the Compute Node is added to the Pool or when the + Compute Node is restarted. If this element is present, it overwrites any + existing StartTask. If omitted, any existing StartTask is left unchanged. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: A list of Certificates to be installed on + each Compute Node in the Pool. If this element is present, it replaces any + existing Certificate references configured on the Pool. If omitted, any + existing Certificate references are left unchanged. For Windows Nodes, the + Batch service installs the Certificates to the specified Certificate store + and location. For Linux Compute Nodes, the Certificates are stored in a + directory inside the Task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this + location. For Certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and Certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: A list of Packages to be installed + on each Compute Node in the Pool. Changes to Package references affect all + new Nodes joining the Pool, but do not affect Compute Nodes that are + already in the Pool until they are rebooted or reimaged. If this element + is present, it replaces any existing Package references. If you specify an + empty collection, then all Package references are removed from the Pool. + If omitted, any existing Package references are left unchanged. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param metadata: A list of name-value pairs associated with the Pool as + metadata. If this element is present, it replaces any existing metadata + configured on the Pool. If you specify an empty collection, any metadata + is removed from the Pool. If omitted, any existing metadata is left + unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(PoolPatchParameter, self).__init__(**kwargs) + self.start_task = kwargs.get('start_task', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_patch_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_patch_parameter_py3.py new file mode 100644 index 00000000..1d579776 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_patch_parameter_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolPatchParameter(Model): + """The set of changes to be made to a Pool. + + :param start_task: A Task to run on each Compute Node as it joins the + Pool. The Task runs when the Compute Node is added to the Pool or when the + Compute Node is restarted. If this element is present, it overwrites any + existing StartTask. If omitted, any existing StartTask is left unchanged. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: A list of Certificates to be installed on + each Compute Node in the Pool. If this element is present, it replaces any + existing Certificate references configured on the Pool. If omitted, any + existing Certificate references are left unchanged. For Windows Nodes, the + Batch service installs the Certificates to the specified Certificate store + and location. For Linux Compute Nodes, the Certificates are stored in a + directory inside the Task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this + location. For Certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and Certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: A list of Packages to be installed + on each Compute Node in the Pool. Changes to Package references affect all + new Nodes joining the Pool, but do not affect Compute Nodes that are + already in the Pool until they are rebooted or reimaged. If this element + is present, it replaces any existing Package references. If you specify an + empty collection, then all Package references are removed from the Pool. + If omitted, any existing Package references are left unchanged. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param metadata: A list of name-value pairs associated with the Pool as + metadata. If this element is present, it replaces any existing metadata + configured on the Pool. If you specify an empty collection, any metadata + is removed from the Pool. If omitted, any existing metadata is left + unchanged. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _attribute_map = { + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, start_task=None, certificate_references=None, application_package_references=None, metadata=None, **kwargs) -> None: + super(PoolPatchParameter, self).__init__(**kwargs) + self.start_task = start_task + self.certificate_references = certificate_references + self.application_package_references = application_package_references + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_remove_nodes_options.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_remove_nodes_options.py new file mode 100644 index 00000000..14be8ddd --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_remove_nodes_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolRemoveNodesOptions(Model): + """Additional parameters for remove_nodes operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolRemoveNodesOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_remove_nodes_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_remove_nodes_options_py3.py new file mode 100644 index 00000000..1fe5eb97 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_remove_nodes_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolRemoveNodesOptions(Model): + """Additional parameters for remove_nodes operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolRemoveNodesOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_resize_options.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_resize_options.py new file mode 100644 index 00000000..e83a7ccc --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_resize_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolResizeOptions(Model): + """Additional parameters for resize operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolResizeOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_resize_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_resize_options_py3.py new file mode 100644 index 00000000..ef457e81 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_resize_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolResizeOptions(Model): + """Additional parameters for resize operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolResizeOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_resize_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_resize_parameter.py new file mode 100644 index 00000000..9d4a258d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_resize_parameter.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolResizeParameter(Model): + """Options for changing the size of a Pool. + + :param target_dedicated_nodes: The desired number of dedicated Compute + Nodes in the Pool. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + Compute Nodes in the Pool. + :type target_low_priority_nodes: int + :param resize_timeout: The timeout for allocation of Nodes to the Pool or + removal of Compute Nodes from the Pool. The default value is 15 minutes. + The minimum value is 5 minutes. If you specify a value less than 5 + minutes, the Batch service returns an error; if you are calling the REST + API directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param node_deallocation_option: Determines what to do with a Compute Node + and its running task(s) if the Pool size is decreasing. The default value + is requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion', 'retainedData' + :type node_deallocation_option: str or + ~azure.batch.models.ComputeNodeDeallocationOption + """ + + _attribute_map = { + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, + } + + def __init__(self, **kwargs): + super(PoolResizeParameter, self).__init__(**kwargs) + self.target_dedicated_nodes = kwargs.get('target_dedicated_nodes', None) + self.target_low_priority_nodes = kwargs.get('target_low_priority_nodes', None) + self.resize_timeout = kwargs.get('resize_timeout', None) + self.node_deallocation_option = kwargs.get('node_deallocation_option', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_resize_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_resize_parameter_py3.py new file mode 100644 index 00000000..ebacfd8f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_resize_parameter_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolResizeParameter(Model): + """Options for changing the size of a Pool. + + :param target_dedicated_nodes: The desired number of dedicated Compute + Nodes in the Pool. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + Compute Nodes in the Pool. + :type target_low_priority_nodes: int + :param resize_timeout: The timeout for allocation of Nodes to the Pool or + removal of Compute Nodes from the Pool. The default value is 15 minutes. + The minimum value is 5 minutes. If you specify a value less than 5 + minutes, the Batch service returns an error; if you are calling the REST + API directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param node_deallocation_option: Determines what to do with a Compute Node + and its running task(s) if the Pool size is decreasing. The default value + is requeue. Possible values include: 'requeue', 'terminate', + 'taskCompletion', 'retainedData' + :type node_deallocation_option: str or + ~azure.batch.models.ComputeNodeDeallocationOption + """ + + _attribute_map = { + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, + } + + def __init__(self, *, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, resize_timeout=None, node_deallocation_option=None, **kwargs) -> None: + super(PoolResizeParameter, self).__init__(**kwargs) + self.target_dedicated_nodes = target_dedicated_nodes + self.target_low_priority_nodes = target_low_priority_nodes + self.resize_timeout = resize_timeout + self.node_deallocation_option = node_deallocation_option diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_specification.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_specification.py new file mode 100644 index 00000000..7fdb2aab --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_specification.py @@ -0,0 +1,194 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolSpecification(Model): + """Specification for creating a new Pool. + + All required parameters must be populated in order to send to Azure. + + :param display_name: The display name for the Pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param vm_size: Required. The size of the virtual machines in the Pool. + All virtual machines in a Pool are the same size. For information about + available sizes of virtual machines in Pools, see Choose a VM size for + Compute Nodes in an Azure Batch Pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the Pool. This property must be specified if the Pool needs to be created + with Azure PaaS VMs. This property and virtualMachineConfiguration are + mutually exclusive and one of the properties must be specified. If neither + is specified then the Batch service returns an error; if you are calling + the REST API directly, the HTTP status code is 400 (Bad Request). This + property cannot be specified if the Batch Account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the Pool. This property must be specified if the Pool needs to be + created with Azure IaaS VMs. This property and cloudServiceConfiguration + are mutually exclusive and one of the properties must be specified. If + neither is specified then the Batch service returns an error; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param max_tasks_per_node: The maximum number of Tasks that can run + concurrently on a single Compute Node in the Pool. The default value is 1. + The maximum value is the smaller of 4 times the number of cores of the + vmSize of the Pool or 256. + :type max_tasks_per_node: int + :param task_scheduling_policy: How Tasks are distributed across Compute + Nodes in a Pool. If not specified, the default is spread. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param resize_timeout: The timeout for allocation of Compute Nodes to the + Pool. This timeout applies only to manual scaling; it has no effect when + enableAutoScale is set to true. The default value is 15 minutes. The + minimum value is 5 minutes. If you specify a value less than 5 minutes, + the Batch service rejects the request with an error; if you are calling + the REST API directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param target_dedicated_nodes: The desired number of dedicated Compute + Nodes in the Pool. This property must not be specified if enableAutoScale + is set to true. If enableAutoScale is set to false, then you must set + either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + Compute Nodes in the Pool. This property must not be specified if + enableAutoScale is set to true. If enableAutoScale is set to false, then + you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the Pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + element is required. The Pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: The formula for the desired number of Compute + Nodes in the Pool. This property must not be specified if enableAutoScale + is set to false. It is required if enableAutoScale is set to true. The + formula is checked for validity before the Pool is created. If the formula + is not valid, the Batch service rejects the request with detailed error + information. + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the Pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service rejects the request with an + invalid property value error; if you are calling the REST API directly, + the HTTP status code is 400 (Bad Request). + :type auto_scale_evaluation_interval: timedelta + :param enable_inter_node_communication: Whether the Pool permits direct + communication between Compute Nodes. Enabling inter-node communication + limits the maximum size of the Pool due to deployment restrictions on the + Compute Nodes of the Pool. This may result in the Pool not reaching its + desired size. The default value is false. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the Pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A Task to run on each Compute Node as it joins the + Pool. The Task runs when the Compute Node is added to the Pool or when the + Compute Node is restarted. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: A list of Certificates to be installed on + each Compute Node in the Pool. For Windows Nodes, the Batch service + installs the Certificates to the specified Certificate store and location. + For Linux Compute Nodes, the Certificates are stored in a directory inside + the Task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this + location. For Certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and Certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of Packages to be + installed on each Compute Node in the Pool. Changes to Package references + affect all new Nodes joining the Pool, but do not affect Compute Nodes + that are already in the Pool until they are rebooted or reimaged. There is + a maximum of 10 Package references on any given Pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each Compute Node in the Pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + Pool creation will fail. The permitted licenses available on the Pool are + 'maya', 'vray', '3dsmax', 'arnold'. An additional charge applies for each + application license added to the Pool. + :type application_licenses: list[str] + :param user_accounts: The list of user Accounts to be created on each + Compute Node in the Pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the Pool as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param mount_configuration: A list of file systems to mount on each node + in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. + :type mount_configuration: list[~azure.batch.models.MountConfiguration] + """ + + _validation = { + 'vm_size': {'required': True}, + } + + _attribute_map = { + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'mount_configuration': {'key': 'mountConfiguration', 'type': '[MountConfiguration]'}, + } + + def __init__(self, **kwargs): + super(PoolSpecification, self).__init__(**kwargs) + self.display_name = kwargs.get('display_name', None) + self.vm_size = kwargs.get('vm_size', None) + self.cloud_service_configuration = kwargs.get('cloud_service_configuration', None) + self.virtual_machine_configuration = kwargs.get('virtual_machine_configuration', None) + self.max_tasks_per_node = kwargs.get('max_tasks_per_node', None) + self.task_scheduling_policy = kwargs.get('task_scheduling_policy', None) + self.resize_timeout = kwargs.get('resize_timeout', None) + self.target_dedicated_nodes = kwargs.get('target_dedicated_nodes', None) + self.target_low_priority_nodes = kwargs.get('target_low_priority_nodes', None) + self.enable_auto_scale = kwargs.get('enable_auto_scale', None) + self.auto_scale_formula = kwargs.get('auto_scale_formula', None) + self.auto_scale_evaluation_interval = kwargs.get('auto_scale_evaluation_interval', None) + self.enable_inter_node_communication = kwargs.get('enable_inter_node_communication', None) + self.network_configuration = kwargs.get('network_configuration', None) + self.start_task = kwargs.get('start_task', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.application_licenses = kwargs.get('application_licenses', None) + self.user_accounts = kwargs.get('user_accounts', None) + self.metadata = kwargs.get('metadata', None) + self.mount_configuration = kwargs.get('mount_configuration', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_specification_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_specification_py3.py new file mode 100644 index 00000000..b86666a9 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_specification_py3.py @@ -0,0 +1,194 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolSpecification(Model): + """Specification for creating a new Pool. + + All required parameters must be populated in order to send to Azure. + + :param display_name: The display name for the Pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param vm_size: Required. The size of the virtual machines in the Pool. + All virtual machines in a Pool are the same size. For information about + available sizes of virtual machines in Pools, see Choose a VM size for + Compute Nodes in an Azure Batch Pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param cloud_service_configuration: The cloud service configuration for + the Pool. This property must be specified if the Pool needs to be created + with Azure PaaS VMs. This property and virtualMachineConfiguration are + mutually exclusive and one of the properties must be specified. If neither + is specified then the Batch service returns an error; if you are calling + the REST API directly, the HTTP status code is 400 (Bad Request). This + property cannot be specified if the Batch Account was created with its + poolAllocationMode property set to 'UserSubscription'. + :type cloud_service_configuration: + ~azure.batch.models.CloudServiceConfiguration + :param virtual_machine_configuration: The virtual machine configuration + for the Pool. This property must be specified if the Pool needs to be + created with Azure IaaS VMs. This property and cloudServiceConfiguration + are mutually exclusive and one of the properties must be specified. If + neither is specified then the Batch service returns an error; if you are + calling the REST API directly, the HTTP status code is 400 (Bad Request). + :type virtual_machine_configuration: + ~azure.batch.models.VirtualMachineConfiguration + :param max_tasks_per_node: The maximum number of Tasks that can run + concurrently on a single Compute Node in the Pool. The default value is 1. + The maximum value is the smaller of 4 times the number of cores of the + vmSize of the Pool or 256. + :type max_tasks_per_node: int + :param task_scheduling_policy: How Tasks are distributed across Compute + Nodes in a Pool. If not specified, the default is spread. + :type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy + :param resize_timeout: The timeout for allocation of Compute Nodes to the + Pool. This timeout applies only to manual scaling; it has no effect when + enableAutoScale is set to true. The default value is 15 minutes. The + minimum value is 5 minutes. If you specify a value less than 5 minutes, + the Batch service rejects the request with an error; if you are calling + the REST API directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param target_dedicated_nodes: The desired number of dedicated Compute + Nodes in the Pool. This property must not be specified if enableAutoScale + is set to true. If enableAutoScale is set to false, then you must set + either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + Compute Nodes in the Pool. This property must not be specified if + enableAutoScale is set to true. If enableAutoScale is set to false, then + you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + :type target_low_priority_nodes: int + :param enable_auto_scale: Whether the Pool size should automatically + adjust over time. If false, at least one of targetDedicateNodes and + targetLowPriorityNodes must be specified. If true, the autoScaleFormula + element is required. The Pool automatically resizes according to the + formula. The default value is false. + :type enable_auto_scale: bool + :param auto_scale_formula: The formula for the desired number of Compute + Nodes in the Pool. This property must not be specified if enableAutoScale + is set to false. It is required if enableAutoScale is set to true. The + formula is checked for validity before the Pool is created. If the formula + is not valid, the Batch service rejects the request with detailed error + information. + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the Pool size according to the autoscale formula. The + default value is 15 minutes. The minimum and maximum value are 5 minutes + and 168 hours respectively. If you specify a value less than 5 minutes or + greater than 168 hours, the Batch service rejects the request with an + invalid property value error; if you are calling the REST API directly, + the HTTP status code is 400 (Bad Request). + :type auto_scale_evaluation_interval: timedelta + :param enable_inter_node_communication: Whether the Pool permits direct + communication between Compute Nodes. Enabling inter-node communication + limits the maximum size of the Pool due to deployment restrictions on the + Compute Nodes of the Pool. This may result in the Pool not reaching its + desired size. The default value is false. + :type enable_inter_node_communication: bool + :param network_configuration: The network configuration for the Pool. + :type network_configuration: ~azure.batch.models.NetworkConfiguration + :param start_task: A Task to run on each Compute Node as it joins the + Pool. The Task runs when the Compute Node is added to the Pool or when the + Compute Node is restarted. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: A list of Certificates to be installed on + each Compute Node in the Pool. For Windows Nodes, the Batch service + installs the Certificates to the specified Certificate store and location. + For Linux Compute Nodes, the Certificates are stored in a directory inside + the Task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this + location. For Certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and Certificates are placed in that directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: The list of Packages to be + installed on each Compute Node in the Pool. Changes to Package references + affect all new Nodes joining the Pool, but do not affect Compute Nodes + that are already in the Pool until they are rebooted or reimaged. There is + a maximum of 10 Package references on any given Pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each Compute Node in the Pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + Pool creation will fail. The permitted licenses available on the Pool are + 'maya', 'vray', '3dsmax', 'arnold'. An additional charge applies for each + application license added to the Pool. + :type application_licenses: list[str] + :param user_accounts: The list of user Accounts to be created on each + Compute Node in the Pool. + :type user_accounts: list[~azure.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the Pool as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.batch.models.MetadataItem] + :param mount_configuration: A list of file systems to mount on each node + in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. + :type mount_configuration: list[~azure.batch.models.MountConfiguration] + """ + + _validation = { + 'vm_size': {'required': True}, + } + + _attribute_map = { + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'}, + 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, + 'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'}, + 'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'}, + 'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'}, + 'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'}, + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'}, + 'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + 'mount_configuration': {'key': 'mountConfiguration', 'type': '[MountConfiguration]'}, + } + + def __init__(self, *, vm_size: str, display_name: str=None, cloud_service_configuration=None, virtual_machine_configuration=None, max_tasks_per_node: int=None, task_scheduling_policy=None, resize_timeout=None, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, enable_auto_scale: bool=None, auto_scale_formula: str=None, auto_scale_evaluation_interval=None, enable_inter_node_communication: bool=None, network_configuration=None, start_task=None, certificate_references=None, application_package_references=None, application_licenses=None, user_accounts=None, metadata=None, mount_configuration=None, **kwargs) -> None: + super(PoolSpecification, self).__init__(**kwargs) + self.display_name = display_name + self.vm_size = vm_size + self.cloud_service_configuration = cloud_service_configuration + self.virtual_machine_configuration = virtual_machine_configuration + self.max_tasks_per_node = max_tasks_per_node + self.task_scheduling_policy = task_scheduling_policy + self.resize_timeout = resize_timeout + self.target_dedicated_nodes = target_dedicated_nodes + self.target_low_priority_nodes = target_low_priority_nodes + self.enable_auto_scale = enable_auto_scale + self.auto_scale_formula = auto_scale_formula + self.auto_scale_evaluation_interval = auto_scale_evaluation_interval + self.enable_inter_node_communication = enable_inter_node_communication + self.network_configuration = network_configuration + self.start_task = start_task + self.certificate_references = certificate_references + self.application_package_references = application_package_references + self.application_licenses = application_licenses + self.user_accounts = user_accounts + self.metadata = metadata + self.mount_configuration = mount_configuration diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_statistics.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_statistics.py new file mode 100644 index 00000000..2b2eb837 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_statistics.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolStatistics(Model): + """Contains utilization and resource usage statistics for the lifetime of a + Pool. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL for the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param usage_stats: Statistics related to Pool usage, such as the amount + of core-time used. + :type usage_stats: ~azure.batch.models.UsageStatistics + :param resource_stats: Statistics related to resource consumption by + Compute Nodes in the Pool. + :type resource_stats: ~azure.batch.models.ResourceStatistics + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'usage_stats': {'key': 'usageStats', 'type': 'UsageStatistics'}, + 'resource_stats': {'key': 'resourceStats', 'type': 'ResourceStatistics'}, + } + + def __init__(self, **kwargs): + super(PoolStatistics, self).__init__(**kwargs) + self.url = kwargs.get('url', None) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.usage_stats = kwargs.get('usage_stats', None) + self.resource_stats = kwargs.get('resource_stats', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_statistics_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_statistics_py3.py new file mode 100644 index 00000000..582b013f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_statistics_py3.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolStatistics(Model): + """Contains utilization and resource usage statistics for the lifetime of a + Pool. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL for the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param usage_stats: Statistics related to Pool usage, such as the amount + of core-time used. + :type usage_stats: ~azure.batch.models.UsageStatistics + :param resource_stats: Statistics related to resource consumption by + Compute Nodes in the Pool. + :type resource_stats: ~azure.batch.models.ResourceStatistics + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'usage_stats': {'key': 'usageStats', 'type': 'UsageStatistics'}, + 'resource_stats': {'key': 'resourceStats', 'type': 'ResourceStatistics'}, + } + + def __init__(self, *, url: str, start_time, last_update_time, usage_stats=None, resource_stats=None, **kwargs) -> None: + super(PoolStatistics, self).__init__(**kwargs) + self.url = url + self.start_time = start_time + self.last_update_time = last_update_time + self.usage_stats = usage_stats + self.resource_stats = resource_stats diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_stop_resize_options.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_stop_resize_options.py new file mode 100644 index 00000000..ab8fec73 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_stop_resize_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolStopResizeOptions(Model): + """Additional parameters for stop_resize operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolStopResizeOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_stop_resize_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_stop_resize_options_py3.py new file mode 100644 index 00000000..d5cc404e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_stop_resize_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolStopResizeOptions(Model): + """Additional parameters for stop_resize operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(PoolStopResizeOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_update_properties_options.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_update_properties_options.py new file mode 100644 index 00000000..ca7f97cb --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_update_properties_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUpdatePropertiesOptions(Model): + """Additional parameters for update_properties operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(PoolUpdatePropertiesOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_update_properties_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_update_properties_options_py3.py new file mode 100644 index 00000000..edf5065c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_update_properties_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUpdatePropertiesOptions(Model): + """Additional parameters for update_properties operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(PoolUpdatePropertiesOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_update_properties_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_update_properties_parameter.py new file mode 100644 index 00000000..4924a413 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_update_properties_parameter.py @@ -0,0 +1,77 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUpdatePropertiesParameter(Model): + """The set of changes to be made to a Pool. + + All required parameters must be populated in order to send to Azure. + + :param start_task: A Task to run on each Compute Node as it joins the + Pool. The Task runs when the Compute Node is added to the Pool or when the + Compute Node is restarted. If this element is present, it overwrites any + existing StartTask. If omitted, any existing StartTask is removed from the + Pool. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: Required. A list of Certificates to be + installed on each Compute Node in the Pool. This list replaces any + existing Certificate references configured on the Pool. If you specify an + empty collection, any existing Certificate references are removed from the + Pool. For Windows Nodes, the Batch service installs the Certificates to + the specified Certificate store and location. For Linux Compute Nodes, the + Certificates are stored in a directory inside the Task working directory + and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the + Task to query for this location. For Certificates with visibility of + 'remoteUser', a 'certs' directory is created in the user's home directory + (e.g., /home/{user-name}/certs) and Certificates are placed in that + directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: Required. The list of Application + Packages to be installed on each Compute Node in the Pool. The list + replaces any existing Application Package references on the Pool. Changes + to Application Package references affect all new Compute Nodes joining the + Pool, but do not affect Compute Nodes that are already in the Pool until + they are rebooted or reimaged. There is a maximum of 10 Application + Package references on any given Pool. If omitted, or if you specify an + empty collection, any existing Application Packages references are removed + from the Pool. A maximum of 10 references may be specified on a given + Pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param metadata: Required. A list of name-value pairs associated with the + Pool as metadata. This list replaces any existing metadata configured on + the Pool. If omitted, or if you specify an empty collection, any existing + metadata is removed from the Pool. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'certificate_references': {'required': True}, + 'application_package_references': {'required': True}, + 'metadata': {'required': True}, + } + + _attribute_map = { + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, **kwargs): + super(PoolUpdatePropertiesParameter, self).__init__(**kwargs) + self.start_task = kwargs.get('start_task', None) + self.certificate_references = kwargs.get('certificate_references', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_update_properties_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_update_properties_parameter_py3.py new file mode 100644 index 00000000..18d1f193 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_update_properties_parameter_py3.py @@ -0,0 +1,77 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUpdatePropertiesParameter(Model): + """The set of changes to be made to a Pool. + + All required parameters must be populated in order to send to Azure. + + :param start_task: A Task to run on each Compute Node as it joins the + Pool. The Task runs when the Compute Node is added to the Pool or when the + Compute Node is restarted. If this element is present, it overwrites any + existing StartTask. If omitted, any existing StartTask is removed from the + Pool. + :type start_task: ~azure.batch.models.StartTask + :param certificate_references: Required. A list of Certificates to be + installed on each Compute Node in the Pool. This list replaces any + existing Certificate references configured on the Pool. If you specify an + empty collection, any existing Certificate references are removed from the + Pool. For Windows Nodes, the Batch service installs the Certificates to + the specified Certificate store and location. For Linux Compute Nodes, the + Certificates are stored in a directory inside the Task working directory + and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the + Task to query for this location. For Certificates with visibility of + 'remoteUser', a 'certs' directory is created in the user's home directory + (e.g., /home/{user-name}/certs) and Certificates are placed in that + directory. + :type certificate_references: + list[~azure.batch.models.CertificateReference] + :param application_package_references: Required. The list of Application + Packages to be installed on each Compute Node in the Pool. The list + replaces any existing Application Package references on the Pool. Changes + to Application Package references affect all new Compute Nodes joining the + Pool, but do not affect Compute Nodes that are already in the Pool until + they are rebooted or reimaged. There is a maximum of 10 Application + Package references on any given Pool. If omitted, or if you specify an + empty collection, any existing Application Packages references are removed + from the Pool. A maximum of 10 references may be specified on a given + Pool. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param metadata: Required. A list of name-value pairs associated with the + Pool as metadata. This list replaces any existing metadata configured on + the Pool. If omitted, or if you specify an empty collection, any existing + metadata is removed from the Pool. + :type metadata: list[~azure.batch.models.MetadataItem] + """ + + _validation = { + 'certificate_references': {'required': True}, + 'application_package_references': {'required': True}, + 'metadata': {'required': True}, + } + + _attribute_map = { + 'start_task': {'key': 'startTask', 'type': 'StartTask'}, + 'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, + } + + def __init__(self, *, certificate_references, application_package_references, metadata, start_task=None, **kwargs) -> None: + super(PoolUpdatePropertiesParameter, self).__init__(**kwargs) + self.start_task = start_task + self.certificate_references = certificate_references + self.application_package_references = application_package_references + self.metadata = metadata diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_usage_metrics.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_usage_metrics.py new file mode 100644 index 00000000..c8ae169e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_usage_metrics.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUsageMetrics(Model): + """Usage metrics for a Pool across an aggregation interval. + + All required parameters must be populated in order to send to Azure. + + :param pool_id: Required. The ID of the Pool whose metrics are aggregated + in this entry. + :type pool_id: str + :param start_time: Required. The start time of the aggregation interval + covered by this entry. + :type start_time: datetime + :param end_time: Required. The end time of the aggregation interval + covered by this entry. + :type end_time: datetime + :param vm_size: Required. The size of virtual machines in the Pool. All + VMs in a Pool are the same size. For information about available sizes of + virtual machines in Pools, see Choose a VM size for Compute Nodes in an + Azure Batch Pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param total_core_hours: Required. The total core hours used in the Pool + during this aggregation interval. + :type total_core_hours: float + """ + + _validation = { + 'pool_id': {'required': True}, + 'start_time': {'required': True}, + 'end_time': {'required': True}, + 'vm_size': {'required': True}, + 'total_core_hours': {'required': True}, + } + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'total_core_hours': {'key': 'totalCoreHours', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(PoolUsageMetrics, self).__init__(**kwargs) + self.pool_id = kwargs.get('pool_id', None) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.vm_size = kwargs.get('vm_size', None) + self.total_core_hours = kwargs.get('total_core_hours', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_usage_metrics_paged.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_usage_metrics_paged.py new file mode 100644 index 00000000..891554f1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_usage_metrics_paged.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class PoolUsageMetricsPaged(Paged): + """ + A paging container for iterating over a list of :class:`PoolUsageMetrics ` object + """ + + _attribute_map = { + 'next_link': {'key': 'odata\\.nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[PoolUsageMetrics]'} + } + + def __init__(self, *args, **kwargs): + + super(PoolUsageMetricsPaged, self).__init__(*args, **kwargs) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/pool_usage_metrics_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/pool_usage_metrics_py3.py new file mode 100644 index 00000000..c0842745 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/pool_usage_metrics_py3.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PoolUsageMetrics(Model): + """Usage metrics for a Pool across an aggregation interval. + + All required parameters must be populated in order to send to Azure. + + :param pool_id: Required. The ID of the Pool whose metrics are aggregated + in this entry. + :type pool_id: str + :param start_time: Required. The start time of the aggregation interval + covered by this entry. + :type start_time: datetime + :param end_time: Required. The end time of the aggregation interval + covered by this entry. + :type end_time: datetime + :param vm_size: Required. The size of virtual machines in the Pool. All + VMs in a Pool are the same size. For information about available sizes of + virtual machines in Pools, see Choose a VM size for Compute Nodes in an + Azure Batch Pool + (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + :type vm_size: str + :param total_core_hours: Required. The total core hours used in the Pool + during this aggregation interval. + :type total_core_hours: float + """ + + _validation = { + 'pool_id': {'required': True}, + 'start_time': {'required': True}, + 'end_time': {'required': True}, + 'vm_size': {'required': True}, + 'total_core_hours': {'required': True}, + } + + _attribute_map = { + 'pool_id': {'key': 'poolId', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'vm_size': {'key': 'vmSize', 'type': 'str'}, + 'total_core_hours': {'key': 'totalCoreHours', 'type': 'float'}, + } + + def __init__(self, *, pool_id: str, start_time, end_time, vm_size: str, total_core_hours: float, **kwargs) -> None: + super(PoolUsageMetrics, self).__init__(**kwargs) + self.pool_id = pool_id + self.start_time = start_time + self.end_time = end_time + self.vm_size = vm_size + self.total_core_hours = total_core_hours diff --git a/azext/generated/sdk/batch/v2019_08_01/models/recent_job.py b/azext/generated/sdk/batch/v2019_08_01/models/recent_job.py new file mode 100644 index 00000000..9aacfff0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/recent_job.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RecentJob(Model): + """Information about the most recent Job to run under the Job Schedule. + + :param id: The ID of the Job. + :type id: str + :param url: The URL of the Job. + :type url: str + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(RecentJob, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.url = kwargs.get('url', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/recent_job_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/recent_job_py3.py new file mode 100644 index 00000000..95286729 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/recent_job_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RecentJob(Model): + """Information about the most recent Job to run under the Job Schedule. + + :param id: The ID of the Job. + :type id: str + :param url: The URL of the Job. + :type url: str + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + def __init__(self, *, id: str=None, url: str=None, **kwargs) -> None: + super(RecentJob, self).__init__(**kwargs) + self.id = id + self.url = url diff --git a/azext/generated/sdk/batch/v2019_08_01/models/resize_error.py b/azext/generated/sdk/batch/v2019_08_01/models/resize_error.py new file mode 100644 index 00000000..ba9061f6 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/resize_error.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResizeError(Model): + """An error that occurred when resizing a Pool. + + :param code: An identifier for the Pool resize error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the Pool resize error, intended to be + suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the Pool + resize error. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(ResizeError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.values = kwargs.get('values', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/resize_error_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/resize_error_py3.py new file mode 100644 index 00000000..1f7295df --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/resize_error_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResizeError(Model): + """An error that occurred when resizing a Pool. + + :param code: An identifier for the Pool resize error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: A message describing the Pool resize error, intended to be + suitable for display in a user interface. + :type message: str + :param values: A list of additional error details related to the Pool + resize error. + :type values: list[~azure.batch.models.NameValuePair] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, code: str=None, message: str=None, values=None, **kwargs) -> None: + super(ResizeError, self).__init__(**kwargs) + self.code = code + self.message = message + self.values = values diff --git a/azext/generated/sdk/batch/v2019_08_01/models/resource_file.py b/azext/generated/sdk/batch/v2019_08_01/models/resource_file.py new file mode 100644 index 00000000..bb221d36 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/resource_file.py @@ -0,0 +1,86 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResourceFile(Model): + """A single file or multiple files to be downloaded to a Compute Node. + + :param auto_storage_container_name: The storage container name in the auto + storage Account. The autoStorageContainerName, storageContainerUrl and + httpUrl properties are mutually exclusive and one of them must be + specified. + :type auto_storage_container_name: str + :param storage_container_url: The URL of the blob container within Azure + Blob Storage. The autoStorageContainerName, storageContainerUrl and + httpUrl properties are mutually exclusive and one of them must be + specified. This URL must be readable and listable using anonymous access; + that is, the Batch service does not present any credentials when + downloading blobs from the container. There are two ways to get such a URL + for a container in Azure storage: include a Shared Access Signature (SAS) + granting read and list permissions on the container, or set the ACL for + the container to allow public access. + :type storage_container_url: str + :param http_url: The URL of the file to download. The + autoStorageContainerName, storageContainerUrl and httpUrl properties are + mutually exclusive and one of them must be specified. If the URL points to + Azure Blob Storage, it must be readable using anonymous access; that is, + the Batch service does not present any credentials when downloading the + blob. There are two ways to get such a URL for a blob in Azure storage: + include a Shared Access Signature (SAS) granting read permissions on the + blob, or set the ACL for the blob or its container to allow public access. + :type http_url: str + :param blob_prefix: The blob prefix to use when downloading blobs from an + Azure Storage container. Only the blobs whose names begin with the + specified prefix will be downloaded. The property is valid only when + autoStorageContainerName or storageContainerUrl is used. This prefix can + be a partial filename or a subdirectory. If a prefix is not specified, all + the files in the container will be downloaded. + :type blob_prefix: str + :param file_path: The location on the Compute Node to which to download + the file(s), relative to the Task's working directory. If the httpUrl + property is specified, the filePath is required and describes the path + which the file will be downloaded to, including the filename. Otherwise, + if the autoStorageContainerName or storageContainerUrl property is + specified, filePath is optional and is the directory to download the files + to. In the case where filePath is used as a directory, any directory + structure already associated with the input data will be retained in full + and appended to the specified filePath directory. The specified relative + path cannot break out of the Task's working directory (for example by + using '..'). + :type file_path: str + :param file_mode: The file permission mode attribute in octal format. This + property applies only to files being downloaded to Linux Compute Nodes. It + will be ignored if it is specified for a resourceFile which will be + downloaded to a Windows Compute Node. If this property is not specified + for a Linux Compute Node, then a default value of 0770 is applied to the + file. + :type file_mode: str + """ + + _attribute_map = { + 'auto_storage_container_name': {'key': 'autoStorageContainerName', 'type': 'str'}, + 'storage_container_url': {'key': 'storageContainerUrl', 'type': 'str'}, + 'http_url': {'key': 'httpUrl', 'type': 'str'}, + 'blob_prefix': {'key': 'blobPrefix', 'type': 'str'}, + 'file_path': {'key': 'filePath', 'type': 'str'}, + 'file_mode': {'key': 'fileMode', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ResourceFile, self).__init__(**kwargs) + self.auto_storage_container_name = kwargs.get('auto_storage_container_name', None) + self.storage_container_url = kwargs.get('storage_container_url', None) + self.http_url = kwargs.get('http_url', None) + self.blob_prefix = kwargs.get('blob_prefix', None) + self.file_path = kwargs.get('file_path', None) + self.file_mode = kwargs.get('file_mode', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/resource_file_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/resource_file_py3.py new file mode 100644 index 00000000..29d3d7b1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/resource_file_py3.py @@ -0,0 +1,86 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResourceFile(Model): + """A single file or multiple files to be downloaded to a Compute Node. + + :param auto_storage_container_name: The storage container name in the auto + storage Account. The autoStorageContainerName, storageContainerUrl and + httpUrl properties are mutually exclusive and one of them must be + specified. + :type auto_storage_container_name: str + :param storage_container_url: The URL of the blob container within Azure + Blob Storage. The autoStorageContainerName, storageContainerUrl and + httpUrl properties are mutually exclusive and one of them must be + specified. This URL must be readable and listable using anonymous access; + that is, the Batch service does not present any credentials when + downloading blobs from the container. There are two ways to get such a URL + for a container in Azure storage: include a Shared Access Signature (SAS) + granting read and list permissions on the container, or set the ACL for + the container to allow public access. + :type storage_container_url: str + :param http_url: The URL of the file to download. The + autoStorageContainerName, storageContainerUrl and httpUrl properties are + mutually exclusive and one of them must be specified. If the URL points to + Azure Blob Storage, it must be readable using anonymous access; that is, + the Batch service does not present any credentials when downloading the + blob. There are two ways to get such a URL for a blob in Azure storage: + include a Shared Access Signature (SAS) granting read permissions on the + blob, or set the ACL for the blob or its container to allow public access. + :type http_url: str + :param blob_prefix: The blob prefix to use when downloading blobs from an + Azure Storage container. Only the blobs whose names begin with the + specified prefix will be downloaded. The property is valid only when + autoStorageContainerName or storageContainerUrl is used. This prefix can + be a partial filename or a subdirectory. If a prefix is not specified, all + the files in the container will be downloaded. + :type blob_prefix: str + :param file_path: The location on the Compute Node to which to download + the file(s), relative to the Task's working directory. If the httpUrl + property is specified, the filePath is required and describes the path + which the file will be downloaded to, including the filename. Otherwise, + if the autoStorageContainerName or storageContainerUrl property is + specified, filePath is optional and is the directory to download the files + to. In the case where filePath is used as a directory, any directory + structure already associated with the input data will be retained in full + and appended to the specified filePath directory. The specified relative + path cannot break out of the Task's working directory (for example by + using '..'). + :type file_path: str + :param file_mode: The file permission mode attribute in octal format. This + property applies only to files being downloaded to Linux Compute Nodes. It + will be ignored if it is specified for a resourceFile which will be + downloaded to a Windows Compute Node. If this property is not specified + for a Linux Compute Node, then a default value of 0770 is applied to the + file. + :type file_mode: str + """ + + _attribute_map = { + 'auto_storage_container_name': {'key': 'autoStorageContainerName', 'type': 'str'}, + 'storage_container_url': {'key': 'storageContainerUrl', 'type': 'str'}, + 'http_url': {'key': 'httpUrl', 'type': 'str'}, + 'blob_prefix': {'key': 'blobPrefix', 'type': 'str'}, + 'file_path': {'key': 'filePath', 'type': 'str'}, + 'file_mode': {'key': 'fileMode', 'type': 'str'}, + } + + def __init__(self, *, auto_storage_container_name: str=None, storage_container_url: str=None, http_url: str=None, blob_prefix: str=None, file_path: str=None, file_mode: str=None, **kwargs) -> None: + super(ResourceFile, self).__init__(**kwargs) + self.auto_storage_container_name = auto_storage_container_name + self.storage_container_url = storage_container_url + self.http_url = http_url + self.blob_prefix = blob_prefix + self.file_path = file_path + self.file_mode = file_mode diff --git a/azext/generated/sdk/batch/v2019_08_01/models/resource_statistics.py b/azext/generated/sdk/batch/v2019_08_01/models/resource_statistics.py new file mode 100644 index 00000000..7861657e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/resource_statistics.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResourceStatistics(Model): + """Statistics related to resource consumption by Compute Nodes in a Pool. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param avg_cpu_percentage: Required. The average CPU usage across all + Compute Nodes in the Pool (percentage per node). + :type avg_cpu_percentage: float + :param avg_memory_gi_b: Required. The average memory usage in GiB across + all Compute Nodes in the Pool. + :type avg_memory_gi_b: float + :param peak_memory_gi_b: Required. The peak memory usage in GiB across all + Compute Nodes in the Pool. + :type peak_memory_gi_b: float + :param avg_disk_gi_b: Required. The average used disk space in GiB across + all Compute Nodes in the Pool. + :type avg_disk_gi_b: float + :param peak_disk_gi_b: Required. The peak used disk space in GiB across + all Compute Nodes in the Pool. + :type peak_disk_gi_b: float + :param disk_read_iops: Required. The total number of disk read operations + across all Compute Nodes in the Pool. + :type disk_read_iops: long + :param disk_write_iops: Required. The total number of disk write + operations across all Compute Nodes in the Pool. + :type disk_write_iops: long + :param disk_read_gi_b: Required. The total amount of data in GiB of disk + reads across all Compute Nodes in the Pool. + :type disk_read_gi_b: float + :param disk_write_gi_b: Required. The total amount of data in GiB of disk + writes across all Compute Nodes in the Pool. + :type disk_write_gi_b: float + :param network_read_gi_b: Required. The total amount of data in GiB of + network reads across all Compute Nodes in the Pool. + :type network_read_gi_b: float + :param network_write_gi_b: Required. The total amount of data in GiB of + network writes across all Compute Nodes in the Pool. + :type network_write_gi_b: float + """ + + _validation = { + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'avg_cpu_percentage': {'required': True}, + 'avg_memory_gi_b': {'required': True}, + 'peak_memory_gi_b': {'required': True}, + 'avg_disk_gi_b': {'required': True}, + 'peak_disk_gi_b': {'required': True}, + 'disk_read_iops': {'required': True}, + 'disk_write_iops': {'required': True}, + 'disk_read_gi_b': {'required': True}, + 'disk_write_gi_b': {'required': True}, + 'network_read_gi_b': {'required': True}, + 'network_write_gi_b': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'avg_cpu_percentage': {'key': 'avgCPUPercentage', 'type': 'float'}, + 'avg_memory_gi_b': {'key': 'avgMemoryGiB', 'type': 'float'}, + 'peak_memory_gi_b': {'key': 'peakMemoryGiB', 'type': 'float'}, + 'avg_disk_gi_b': {'key': 'avgDiskGiB', 'type': 'float'}, + 'peak_disk_gi_b': {'key': 'peakDiskGiB', 'type': 'float'}, + 'disk_read_iops': {'key': 'diskReadIOps', 'type': 'long'}, + 'disk_write_iops': {'key': 'diskWriteIOps', 'type': 'long'}, + 'disk_read_gi_b': {'key': 'diskReadGiB', 'type': 'float'}, + 'disk_write_gi_b': {'key': 'diskWriteGiB', 'type': 'float'}, + 'network_read_gi_b': {'key': 'networkReadGiB', 'type': 'float'}, + 'network_write_gi_b': {'key': 'networkWriteGiB', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(ResourceStatistics, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.avg_cpu_percentage = kwargs.get('avg_cpu_percentage', None) + self.avg_memory_gi_b = kwargs.get('avg_memory_gi_b', None) + self.peak_memory_gi_b = kwargs.get('peak_memory_gi_b', None) + self.avg_disk_gi_b = kwargs.get('avg_disk_gi_b', None) + self.peak_disk_gi_b = kwargs.get('peak_disk_gi_b', None) + self.disk_read_iops = kwargs.get('disk_read_iops', None) + self.disk_write_iops = kwargs.get('disk_write_iops', None) + self.disk_read_gi_b = kwargs.get('disk_read_gi_b', None) + self.disk_write_gi_b = kwargs.get('disk_write_gi_b', None) + self.network_read_gi_b = kwargs.get('network_read_gi_b', None) + self.network_write_gi_b = kwargs.get('network_write_gi_b', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/resource_statistics_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/resource_statistics_py3.py new file mode 100644 index 00000000..85289d9c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/resource_statistics_py3.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResourceStatistics(Model): + """Statistics related to resource consumption by Compute Nodes in a Pool. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param avg_cpu_percentage: Required. The average CPU usage across all + Compute Nodes in the Pool (percentage per node). + :type avg_cpu_percentage: float + :param avg_memory_gi_b: Required. The average memory usage in GiB across + all Compute Nodes in the Pool. + :type avg_memory_gi_b: float + :param peak_memory_gi_b: Required. The peak memory usage in GiB across all + Compute Nodes in the Pool. + :type peak_memory_gi_b: float + :param avg_disk_gi_b: Required. The average used disk space in GiB across + all Compute Nodes in the Pool. + :type avg_disk_gi_b: float + :param peak_disk_gi_b: Required. The peak used disk space in GiB across + all Compute Nodes in the Pool. + :type peak_disk_gi_b: float + :param disk_read_iops: Required. The total number of disk read operations + across all Compute Nodes in the Pool. + :type disk_read_iops: long + :param disk_write_iops: Required. The total number of disk write + operations across all Compute Nodes in the Pool. + :type disk_write_iops: long + :param disk_read_gi_b: Required. The total amount of data in GiB of disk + reads across all Compute Nodes in the Pool. + :type disk_read_gi_b: float + :param disk_write_gi_b: Required. The total amount of data in GiB of disk + writes across all Compute Nodes in the Pool. + :type disk_write_gi_b: float + :param network_read_gi_b: Required. The total amount of data in GiB of + network reads across all Compute Nodes in the Pool. + :type network_read_gi_b: float + :param network_write_gi_b: Required. The total amount of data in GiB of + network writes across all Compute Nodes in the Pool. + :type network_write_gi_b: float + """ + + _validation = { + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'avg_cpu_percentage': {'required': True}, + 'avg_memory_gi_b': {'required': True}, + 'peak_memory_gi_b': {'required': True}, + 'avg_disk_gi_b': {'required': True}, + 'peak_disk_gi_b': {'required': True}, + 'disk_read_iops': {'required': True}, + 'disk_write_iops': {'required': True}, + 'disk_read_gi_b': {'required': True}, + 'disk_write_gi_b': {'required': True}, + 'network_read_gi_b': {'required': True}, + 'network_write_gi_b': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'avg_cpu_percentage': {'key': 'avgCPUPercentage', 'type': 'float'}, + 'avg_memory_gi_b': {'key': 'avgMemoryGiB', 'type': 'float'}, + 'peak_memory_gi_b': {'key': 'peakMemoryGiB', 'type': 'float'}, + 'avg_disk_gi_b': {'key': 'avgDiskGiB', 'type': 'float'}, + 'peak_disk_gi_b': {'key': 'peakDiskGiB', 'type': 'float'}, + 'disk_read_iops': {'key': 'diskReadIOps', 'type': 'long'}, + 'disk_write_iops': {'key': 'diskWriteIOps', 'type': 'long'}, + 'disk_read_gi_b': {'key': 'diskReadGiB', 'type': 'float'}, + 'disk_write_gi_b': {'key': 'diskWriteGiB', 'type': 'float'}, + 'network_read_gi_b': {'key': 'networkReadGiB', 'type': 'float'}, + 'network_write_gi_b': {'key': 'networkWriteGiB', 'type': 'float'}, + } + + def __init__(self, *, start_time, last_update_time, avg_cpu_percentage: float, avg_memory_gi_b: float, peak_memory_gi_b: float, avg_disk_gi_b: float, peak_disk_gi_b: float, disk_read_iops: int, disk_write_iops: int, disk_read_gi_b: float, disk_write_gi_b: float, network_read_gi_b: float, network_write_gi_b: float, **kwargs) -> None: + super(ResourceStatistics, self).__init__(**kwargs) + self.start_time = start_time + self.last_update_time = last_update_time + self.avg_cpu_percentage = avg_cpu_percentage + self.avg_memory_gi_b = avg_memory_gi_b + self.peak_memory_gi_b = peak_memory_gi_b + self.avg_disk_gi_b = avg_disk_gi_b + self.peak_disk_gi_b = peak_disk_gi_b + self.disk_read_iops = disk_read_iops + self.disk_write_iops = disk_write_iops + self.disk_read_gi_b = disk_read_gi_b + self.disk_write_gi_b = disk_write_gi_b + self.network_read_gi_b = network_read_gi_b + self.network_write_gi_b = network_write_gi_b diff --git a/azext/generated/sdk/batch/v2019_08_01/models/schedule.py b/azext/generated/sdk/batch/v2019_08_01/models/schedule.py new file mode 100644 index 00000000..6faff7c4 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/schedule.py @@ -0,0 +1,74 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Schedule(Model): + """The schedule according to which Jobs will be created. + + :param do_not_run_until: The earliest time at which any Job may be created + under this Job Schedule. If you do not specify a doNotRunUntil time, the + schedule becomes ready to create Jobs immediately. + :type do_not_run_until: datetime + :param do_not_run_after: A time after which no Job will be created under + this Job Schedule. The schedule will move to the completed state as soon + as this deadline is past and there is no active Job under this Job + Schedule. If you do not specify a doNotRunAfter time, and you are creating + a recurring Job Schedule, the Job Schedule will remain active until you + explicitly terminate it. + :type do_not_run_after: datetime + :param start_window: The time interval, starting from the time at which + the schedule indicates a Job should be created, within which a Job must be + created. If a Job is not created within the startWindow interval, then the + 'opportunity' is lost; no Job will be created until the next recurrence of + the schedule. If the schedule is recurring, and the startWindow is longer + than the recurrence interval, then this is equivalent to an infinite + startWindow, because the Job that is 'due' in one recurrenceInterval is + not carried forward into the next recurrence interval. The default is + infinite. The minimum value is 1 minute. If you specify a lower value, the + Batch service rejects the schedule with an error; if you are calling the + REST API directly, the HTTP status code is 400 (Bad Request). + :type start_window: timedelta + :param recurrence_interval: The time interval between the start times of + two successive Jobs under the Job Schedule. A Job Schedule can have at + most one active Job under it at any given time. Because a Job Schedule can + have at most one active Job under it at any given time, if it is time to + create a new Job under a Job Schedule, but the previous Job is still + running, the Batch service will not create the new Job until the previous + Job finishes. If the previous Job does not finish within the startWindow + period of the new recurrenceInterval, then no new Job will be scheduled + for that interval. For recurring Jobs, you should normally specify a + jobManagerTask in the jobSpecification. If you do not use jobManagerTask, + you will need an external process to monitor when Jobs are created, add + Tasks to the Jobs and terminate the Jobs ready for the next recurrence. + The default is that the schedule does not recur: one Job is created, + within the startWindow after the doNotRunUntil time, and the schedule is + complete as soon as that Job finishes. The minimum value is 1 minute. If + you specify a lower value, the Batch service rejects the schedule with an + error; if you are calling the REST API directly, the HTTP status code is + 400 (Bad Request). + :type recurrence_interval: timedelta + """ + + _attribute_map = { + 'do_not_run_until': {'key': 'doNotRunUntil', 'type': 'iso-8601'}, + 'do_not_run_after': {'key': 'doNotRunAfter', 'type': 'iso-8601'}, + 'start_window': {'key': 'startWindow', 'type': 'duration'}, + 'recurrence_interval': {'key': 'recurrenceInterval', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(Schedule, self).__init__(**kwargs) + self.do_not_run_until = kwargs.get('do_not_run_until', None) + self.do_not_run_after = kwargs.get('do_not_run_after', None) + self.start_window = kwargs.get('start_window', None) + self.recurrence_interval = kwargs.get('recurrence_interval', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/schedule_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/schedule_py3.py new file mode 100644 index 00000000..34189c09 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/schedule_py3.py @@ -0,0 +1,74 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Schedule(Model): + """The schedule according to which Jobs will be created. + + :param do_not_run_until: The earliest time at which any Job may be created + under this Job Schedule. If you do not specify a doNotRunUntil time, the + schedule becomes ready to create Jobs immediately. + :type do_not_run_until: datetime + :param do_not_run_after: A time after which no Job will be created under + this Job Schedule. The schedule will move to the completed state as soon + as this deadline is past and there is no active Job under this Job + Schedule. If you do not specify a doNotRunAfter time, and you are creating + a recurring Job Schedule, the Job Schedule will remain active until you + explicitly terminate it. + :type do_not_run_after: datetime + :param start_window: The time interval, starting from the time at which + the schedule indicates a Job should be created, within which a Job must be + created. If a Job is not created within the startWindow interval, then the + 'opportunity' is lost; no Job will be created until the next recurrence of + the schedule. If the schedule is recurring, and the startWindow is longer + than the recurrence interval, then this is equivalent to an infinite + startWindow, because the Job that is 'due' in one recurrenceInterval is + not carried forward into the next recurrence interval. The default is + infinite. The minimum value is 1 minute. If you specify a lower value, the + Batch service rejects the schedule with an error; if you are calling the + REST API directly, the HTTP status code is 400 (Bad Request). + :type start_window: timedelta + :param recurrence_interval: The time interval between the start times of + two successive Jobs under the Job Schedule. A Job Schedule can have at + most one active Job under it at any given time. Because a Job Schedule can + have at most one active Job under it at any given time, if it is time to + create a new Job under a Job Schedule, but the previous Job is still + running, the Batch service will not create the new Job until the previous + Job finishes. If the previous Job does not finish within the startWindow + period of the new recurrenceInterval, then no new Job will be scheduled + for that interval. For recurring Jobs, you should normally specify a + jobManagerTask in the jobSpecification. If you do not use jobManagerTask, + you will need an external process to monitor when Jobs are created, add + Tasks to the Jobs and terminate the Jobs ready for the next recurrence. + The default is that the schedule does not recur: one Job is created, + within the startWindow after the doNotRunUntil time, and the schedule is + complete as soon as that Job finishes. The minimum value is 1 minute. If + you specify a lower value, the Batch service rejects the schedule with an + error; if you are calling the REST API directly, the HTTP status code is + 400 (Bad Request). + :type recurrence_interval: timedelta + """ + + _attribute_map = { + 'do_not_run_until': {'key': 'doNotRunUntil', 'type': 'iso-8601'}, + 'do_not_run_after': {'key': 'doNotRunAfter', 'type': 'iso-8601'}, + 'start_window': {'key': 'startWindow', 'type': 'duration'}, + 'recurrence_interval': {'key': 'recurrenceInterval', 'type': 'duration'}, + } + + def __init__(self, *, do_not_run_until=None, do_not_run_after=None, start_window=None, recurrence_interval=None, **kwargs) -> None: + super(Schedule, self).__init__(**kwargs) + self.do_not_run_until = do_not_run_until + self.do_not_run_after = do_not_run_after + self.start_window = start_window + self.recurrence_interval = recurrence_interval diff --git a/azext/generated/sdk/batch/v2019_08_01/models/start_task.py b/azext/generated/sdk/batch/v2019_08_01/models/start_task.py new file mode 100644 index 00000000..613a192f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/start_task.py @@ -0,0 +1,116 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class StartTask(Model): + """A Task which is run when a Node joins a Pool in the Azure Batch service, or + when the Compute Node is rebooted or reimaged. + + Batch will retry Tasks when a recovery operation is triggered on a Node. + Examples of recovery operations include (but are not limited to) when an + unhealthy Node is rebooted or a Compute Node disappeared due to host + failure. Retries due to recovery operations are independent of and are not + counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, + an internal retry due to a recovery operation may occur. Because of this, + all Tasks should be idempotent. This means Tasks need to tolerate being + interrupted and restarted without causing any corruption or duplicate data. + The best practice for long running Tasks is to use some form of + checkpointing. In some cases the StartTask may be re-run even though the + Compute Node was not rebooted. Special care should be taken to avoid + StartTasks which create breakaway process or install/launch services from + the StartTask working directory, as this will block Batch from being able + to re-run the StartTask. + + All required parameters must be populated in order to send to Azure. + + :param command_line: Required. The command line of the StartTask. The + command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the Task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + StartTask runs. When this is specified, all directories recursively below + the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the + node) are mapped into the container, all Task environment variables are + mapped into the container, and the Task command line is executed in the + container. Files produced in the container outside of + AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning + that Batch file APIs will not be able to access those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. Files listed under this + element are located in the Task's working directory. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the StartTask. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param user_identity: The user identity under which the StartTask runs. If + omitted, the Task runs as a non-administrative user unique to the Task. + :type user_identity: ~azure.batch.models.UserIdentity + :param max_task_retry_count: The maximum number of times the Task may be + retried. The Batch service retries a Task if its exit code is nonzero. + Note that this value specifically controls the number of retries. The + Batch service will try the Task once, and may then retry up to this limit. + For example, if the maximum retry count is 3, Batch tries the Task up to 4 + times (one initial try and 3 retries). If the maximum retry count is 0, + the Batch service does not retry the Task. If the maximum retry count is + -1, the Batch service retries the Task without limit. + :type max_task_retry_count: int + :param wait_for_success: Whether the Batch service should wait for the + StartTask to complete successfully (that is, to exit with exit code 0) + before scheduling any Tasks on the Compute Node. If true and the StartTask + fails on a Node, the Batch service retries the StartTask up to its maximum + retry count (maxTaskRetryCount). If the Task has still not completed + successfully after all retries, then the Batch service marks the Node + unusable, and will not schedule Tasks to it. This condition can be + detected via the Compute Node state and failure info details. If false, + the Batch service will not wait for the StartTask to complete. In this + case, other Tasks can start executing on the Compute Node while the + StartTask is still running; and even if the StartTask fails, new Tasks + will continue to be scheduled on the Compute Node. The default is true. + :type wait_for_success: bool + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + 'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(StartTask, self).__init__(**kwargs) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.resource_files = kwargs.get('resource_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.user_identity = kwargs.get('user_identity', None) + self.max_task_retry_count = kwargs.get('max_task_retry_count', None) + self.wait_for_success = kwargs.get('wait_for_success', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/start_task_information.py b/azext/generated/sdk/batch/v2019_08_01/models/start_task_information.py new file mode 100644 index 00000000..1fbe6890 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/start_task_information.py @@ -0,0 +1,99 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class StartTaskInformation(Model): + """Information about a StartTask running on a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param state: Required. The state of the StartTask on the Compute Node. + Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.StartTaskState + :param start_time: Required. The time at which the StartTask started + running. This value is reset every time the Task is restarted or retried + (that is, this is the most recent time at which the StartTask started + running). + :type start_time: datetime + :param end_time: The time at which the StartTask stopped running. This is + the end time of the most recent run of the StartTask, if that run has + completed (even if that run failed and a retry is pending). This element + is not present if the StartTask is currently running. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the StartTask + command line. This property is set only if the StartTask is in the + completed state. In general, the exit code for a process reflects the + specific convention implemented by the application developer for that + process. If you use the exit code value to make decisions in your code, be + sure that you know the exit code convention used by the application + process. However, if the Batch service terminates the StartTask (due to + timeout, or user termination via the API) you may see an operating + system-defined exit code. + :type exit_code: int + :param container_info: Information about the container under which the + Task is executing. This property is set only if the Task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the Task failure, if any. This + property is set only if the Task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the Task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the Task could not be run) and + file upload errors are not retried. The Batch service will retry the Task + up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the Task + started running. This element is present only if the Task was retried + (i.e. retryCount is nonzero). If present, this is typically the same as + startTime, but may be different if the Task has been restarted for reasons + other than retry; for example, if the Compute Node was rebooted during a + retry, then the startTime is updated but the lastRetryTime is not. + :type last_retry_time: datetime + :param result: The result of the Task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'state': {'required': True}, + 'start_time': {'required': True}, + 'retry_count': {'required': True}, + } + + _attribute_map = { + 'state': {'key': 'state', 'type': 'StartTaskState'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, **kwargs): + super(StartTaskInformation, self).__init__(**kwargs) + self.state = kwargs.get('state', None) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.exit_code = kwargs.get('exit_code', None) + self.container_info = kwargs.get('container_info', None) + self.failure_info = kwargs.get('failure_info', None) + self.retry_count = kwargs.get('retry_count', None) + self.last_retry_time = kwargs.get('last_retry_time', None) + self.result = kwargs.get('result', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/start_task_information_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/start_task_information_py3.py new file mode 100644 index 00000000..53eebcfd --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/start_task_information_py3.py @@ -0,0 +1,99 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class StartTaskInformation(Model): + """Information about a StartTask running on a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param state: Required. The state of the StartTask on the Compute Node. + Possible values include: 'running', 'completed' + :type state: str or ~azure.batch.models.StartTaskState + :param start_time: Required. The time at which the StartTask started + running. This value is reset every time the Task is restarted or retried + (that is, this is the most recent time at which the StartTask started + running). + :type start_time: datetime + :param end_time: The time at which the StartTask stopped running. This is + the end time of the most recent run of the StartTask, if that run has + completed (even if that run failed and a retry is pending). This element + is not present if the StartTask is currently running. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the StartTask + command line. This property is set only if the StartTask is in the + completed state. In general, the exit code for a process reflects the + specific convention implemented by the application developer for that + process. If you use the exit code value to make decisions in your code, be + sure that you know the exit code convention used by the application + process. However, if the Batch service terminates the StartTask (due to + timeout, or user termination via the API) you may see an operating + system-defined exit code. + :type exit_code: int + :param container_info: Information about the container under which the + Task is executing. This property is set only if the Task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the Task failure, if any. This + property is set only if the Task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the Task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the Task could not be run) and + file upload errors are not retried. The Batch service will retry the Task + up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the Task + started running. This element is present only if the Task was retried + (i.e. retryCount is nonzero). If present, this is typically the same as + startTime, but may be different if the Task has been restarted for reasons + other than retry; for example, if the Compute Node was rebooted during a + retry, then the startTime is updated but the lastRetryTime is not. + :type last_retry_time: datetime + :param result: The result of the Task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'state': {'required': True}, + 'start_time': {'required': True}, + 'retry_count': {'required': True}, + } + + _attribute_map = { + 'state': {'key': 'state', 'type': 'StartTaskState'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, *, state, start_time, retry_count: int, end_time=None, exit_code: int=None, container_info=None, failure_info=None, last_retry_time=None, result=None, **kwargs) -> None: + super(StartTaskInformation, self).__init__(**kwargs) + self.state = state + self.start_time = start_time + self.end_time = end_time + self.exit_code = exit_code + self.container_info = container_info + self.failure_info = failure_info + self.retry_count = retry_count + self.last_retry_time = last_retry_time + self.result = result diff --git a/azext/generated/sdk/batch/v2019_08_01/models/start_task_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/start_task_py3.py new file mode 100644 index 00000000..3d89f824 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/start_task_py3.py @@ -0,0 +1,116 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class StartTask(Model): + """A Task which is run when a Node joins a Pool in the Azure Batch service, or + when the Compute Node is rebooted or reimaged. + + Batch will retry Tasks when a recovery operation is triggered on a Node. + Examples of recovery operations include (but are not limited to) when an + unhealthy Node is rebooted or a Compute Node disappeared due to host + failure. Retries due to recovery operations are independent of and are not + counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, + an internal retry due to a recovery operation may occur. Because of this, + all Tasks should be idempotent. This means Tasks need to tolerate being + interrupted and restarted without causing any corruption or duplicate data. + The best practice for long running Tasks is to use some form of + checkpointing. In some cases the StartTask may be re-run even though the + Compute Node was not rebooted. Special care should be taken to avoid + StartTasks which create breakaway process or install/launch services from + the StartTask working directory, as this will block Batch from being able + to re-run the StartTask. + + All required parameters must be populated in order to send to Azure. + + :param command_line: Required. The command line of the StartTask. The + command line does not run under a shell, and therefore cannot take + advantage of shell features such as environment variable expansion. If you + want to take advantage of such features, you should invoke the shell in + the command line, for example using "cmd /c MyCommand" in Windows or + "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, + it should use a relative path (relative to the Task working directory), or + use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + StartTask runs. When this is specified, all directories recursively below + the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the + node) are mapped into the container, all Task environment variables are + mapped into the container, and the Task command line is executed in the + container. Files produced in the container outside of + AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning + that Batch file APIs will not be able to access those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. There is a + maximum size for the list of resource files. When the max size is + exceeded, the request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. Files listed under this + element are located in the Task's working directory. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the StartTask. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param user_identity: The user identity under which the StartTask runs. If + omitted, the Task runs as a non-administrative user unique to the Task. + :type user_identity: ~azure.batch.models.UserIdentity + :param max_task_retry_count: The maximum number of times the Task may be + retried. The Batch service retries a Task if its exit code is nonzero. + Note that this value specifically controls the number of retries. The + Batch service will try the Task once, and may then retry up to this limit. + For example, if the maximum retry count is 3, Batch tries the Task up to 4 + times (one initial try and 3 retries). If the maximum retry count is 0, + the Batch service does not retry the Task. If the maximum retry count is + -1, the Batch service retries the Task without limit. + :type max_task_retry_count: int + :param wait_for_success: Whether the Batch service should wait for the + StartTask to complete successfully (that is, to exit with exit code 0) + before scheduling any Tasks on the Compute Node. If true and the StartTask + fails on a Node, the Batch service retries the StartTask up to its maximum + retry count (maxTaskRetryCount). If the Task has still not completed + successfully after all retries, then the Batch service marks the Node + unusable, and will not schedule Tasks to it. This condition can be + detected via the Compute Node state and failure info details. If false, + the Batch service will not wait for the StartTask to complete. In this + case, other Tasks can start executing on the Compute Node while the + StartTask is still running; and even if the StartTask fails, new Tasks + will continue to be scheduled on the Compute Node. The default is true. + :type wait_for_success: bool + """ + + _validation = { + 'command_line': {'required': True}, + } + + _attribute_map = { + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + 'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'}, + } + + def __init__(self, *, command_line: str, container_settings=None, resource_files=None, environment_settings=None, user_identity=None, max_task_retry_count: int=None, wait_for_success: bool=None, **kwargs) -> None: + super(StartTask, self).__init__(**kwargs) + self.command_line = command_line + self.container_settings = container_settings + self.resource_files = resource_files + self.environment_settings = environment_settings + self.user_identity = user_identity + self.max_task_retry_count = max_task_retry_count + self.wait_for_success = wait_for_success diff --git a/azext/generated/sdk/batch/v2019_08_01/models/subtask_information.py b/azext/generated/sdk/batch/v2019_08_01/models/subtask_information.py new file mode 100644 index 00000000..3af9e1d4 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/subtask_information.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class SubtaskInformation(Model): + """Information about an Azure Batch subtask. + + :param id: The ID of the subtask. + :type id: int + :param node_info: Information about the Compute Node on which the subtask + ran. + :type node_info: ~azure.batch.models.ComputeNodeInformation + :param start_time: The time at which the subtask started running. If the + subtask has been restarted or retried, this is the most recent time at + which the subtask started running. + :type start_time: datetime + :param end_time: The time at which the subtask completed. This property is + set only if the subtask is in the Completed state. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the subtask + command line. This property is set only if the subtask is in the completed + state. In general, the exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. + However, if the Batch service terminates the subtask (due to timeout, or + user termination via the API) you may see an operating system-defined exit + code. + :type exit_code: int + :param container_info: Information about the container under which the + Task is executing. This property is set only if the Task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the Task failure, if any. This + property is set only if the Task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param state: The current state of the subtask. Possible values include: + 'preparing', 'running', 'completed' + :type state: str or ~azure.batch.models.SubtaskState + :param state_transition_time: The time at which the subtask entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the subtask. This property is + not set if the subtask is in its initial running state. Possible values + include: 'preparing', 'running', 'completed' + :type previous_state: str or ~azure.batch.models.SubtaskState + :param previous_state_transition_time: The time at which the subtask + entered its previous state. This property is not set if the subtask is in + its initial running state. + :type previous_state_transition_time: datetime + :param result: The result of the Task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'int'}, + 'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'state': {'key': 'state', 'type': 'SubtaskState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'SubtaskState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, **kwargs): + super(SubtaskInformation, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.node_info = kwargs.get('node_info', None) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.exit_code = kwargs.get('exit_code', None) + self.container_info = kwargs.get('container_info', None) + self.failure_info = kwargs.get('failure_info', None) + self.state = kwargs.get('state', None) + self.state_transition_time = kwargs.get('state_transition_time', None) + self.previous_state = kwargs.get('previous_state', None) + self.previous_state_transition_time = kwargs.get('previous_state_transition_time', None) + self.result = kwargs.get('result', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/subtask_information_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/subtask_information_py3.py new file mode 100644 index 00000000..a62d27fb --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/subtask_information_py3.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class SubtaskInformation(Model): + """Information about an Azure Batch subtask. + + :param id: The ID of the subtask. + :type id: int + :param node_info: Information about the Compute Node on which the subtask + ran. + :type node_info: ~azure.batch.models.ComputeNodeInformation + :param start_time: The time at which the subtask started running. If the + subtask has been restarted or retried, this is the most recent time at + which the subtask started running. + :type start_time: datetime + :param end_time: The time at which the subtask completed. This property is + set only if the subtask is in the Completed state. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the subtask + command line. This property is set only if the subtask is in the completed + state. In general, the exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. + However, if the Batch service terminates the subtask (due to timeout, or + user termination via the API) you may see an operating system-defined exit + code. + :type exit_code: int + :param container_info: Information about the container under which the + Task is executing. This property is set only if the Task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the Task failure, if any. This + property is set only if the Task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param state: The current state of the subtask. Possible values include: + 'preparing', 'running', 'completed' + :type state: str or ~azure.batch.models.SubtaskState + :param state_transition_time: The time at which the subtask entered its + current state. + :type state_transition_time: datetime + :param previous_state: The previous state of the subtask. This property is + not set if the subtask is in its initial running state. Possible values + include: 'preparing', 'running', 'completed' + :type previous_state: str or ~azure.batch.models.SubtaskState + :param previous_state_transition_time: The time at which the subtask + entered its previous state. This property is not set if the subtask is in + its initial running state. + :type previous_state_transition_time: datetime + :param result: The result of the Task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'int'}, + 'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'state': {'key': 'state', 'type': 'SubtaskState'}, + 'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'}, + 'previous_state': {'key': 'previousState', 'type': 'SubtaskState'}, + 'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, *, id: int=None, node_info=None, start_time=None, end_time=None, exit_code: int=None, container_info=None, failure_info=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, result=None, **kwargs) -> None: + super(SubtaskInformation, self).__init__(**kwargs) + self.id = id + self.node_info = node_info + self.start_time = start_time + self.end_time = end_time + self.exit_code = exit_code + self.container_info = container_info + self.failure_info = failure_info + self.state = state + self.state_transition_time = state_transition_time + self.previous_state = previous_state + self.previous_state_transition_time = previous_state_transition_time + self.result = result diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_add_collection_options.py b/azext/generated/sdk/batch/v2019_08_01/models/task_add_collection_options.py new file mode 100644 index 00000000..f0622c9c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_add_collection_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionOptions(Model): + """Additional parameters for add_collection operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskAddCollectionOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_add_collection_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_add_collection_options_py3.py new file mode 100644 index 00000000..634f522c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_add_collection_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionOptions(Model): + """Additional parameters for add_collection operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(TaskAddCollectionOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_add_collection_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/task_add_collection_parameter.py new file mode 100644 index 00000000..be387bb5 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_add_collection_parameter.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionParameter(Model): + """A collection of Azure Batch Tasks to add. + + All required parameters must be populated in order to send to Azure. + + :param value: Required. The collection of Tasks to add. The maximum count + of Tasks is 100. The total serialized size of this collection must be less + than 1MB. If it is greater than 1MB (for example if each Task has 100's of + resource files or environment variables), the request will fail with code + 'RequestBodyTooLarge' and should be retried again with fewer Tasks. + :type value: list[~azure.batch.models.TaskAddParameter] + """ + + _validation = { + 'value': {'required': True, 'max_items': 100}, + } + + _attribute_map = { + 'value': {'key': 'value', 'type': '[TaskAddParameter]'}, + } + + def __init__(self, **kwargs): + super(TaskAddCollectionParameter, self).__init__(**kwargs) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_add_collection_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_add_collection_parameter_py3.py new file mode 100644 index 00000000..ed3330e2 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_add_collection_parameter_py3.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionParameter(Model): + """A collection of Azure Batch Tasks to add. + + All required parameters must be populated in order to send to Azure. + + :param value: Required. The collection of Tasks to add. The maximum count + of Tasks is 100. The total serialized size of this collection must be less + than 1MB. If it is greater than 1MB (for example if each Task has 100's of + resource files or environment variables), the request will fail with code + 'RequestBodyTooLarge' and should be retried again with fewer Tasks. + :type value: list[~azure.batch.models.TaskAddParameter] + """ + + _validation = { + 'value': {'required': True, 'max_items': 100}, + } + + _attribute_map = { + 'value': {'key': 'value', 'type': '[TaskAddParameter]'}, + } + + def __init__(self, *, value, **kwargs) -> None: + super(TaskAddCollectionParameter, self).__init__(**kwargs) + self.value = value diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_add_collection_result.py b/azext/generated/sdk/batch/v2019_08_01/models/task_add_collection_result.py new file mode 100644 index 00000000..caffae23 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_add_collection_result.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionResult(Model): + """The result of adding a collection of Tasks to a Job. + + :param value: The results of the add Task collection operation. + :type value: list[~azure.batch.models.TaskAddResult] + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[TaskAddResult]'}, + } + + def __init__(self, **kwargs): + super(TaskAddCollectionResult, self).__init__(**kwargs) + self.value = kwargs.get('value', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_add_collection_result_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_add_collection_result_py3.py new file mode 100644 index 00000000..f64f9f53 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_add_collection_result_py3.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddCollectionResult(Model): + """The result of adding a collection of Tasks to a Job. + + :param value: The results of the add Task collection operation. + :type value: list[~azure.batch.models.TaskAddResult] + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[TaskAddResult]'}, + } + + def __init__(self, *, value=None, **kwargs) -> None: + super(TaskAddCollectionResult, self).__init__(**kwargs) + self.value = value diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_add_options.py b/azext/generated/sdk/batch/v2019_08_01/models/task_add_options.py new file mode 100644 index 00000000..667cc19d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_add_options.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskAddOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_add_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_add_options_py3.py new file mode 100644 index 00000000..da9c6a8c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_add_options_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddOptions(Model): + """Additional parameters for add operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(TaskAddOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_add_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/task_add_parameter.py new file mode 100644 index 00000000..0bad1f29 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_add_parameter.py @@ -0,0 +1,170 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddParameter(Model): + """An Azure Batch Task to add. + + Batch will retry Tasks when a recovery operation is triggered on a Node. + Examples of recovery operations include (but are not limited to) when an + unhealthy Node is rebooted or a Compute Node disappeared due to host + failure. Retries due to recovery operations are independent of and are not + counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, + an internal retry due to a recovery operation may occur. Because of this, + all Tasks should be idempotent. This means Tasks need to tolerate being + interrupted and restarted without causing any corruption or duplicate data. + The best practice for long running Tasks is to use some form of + checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the Task within the + Job. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within a Job that differ only by case). + :type id: str + :param display_name: A display name for the Task. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param command_line: Required. The command line of the Task. For + multi-instance Tasks, the command line is executed as the primary Task, + after the primary Task and all subtasks have finished executing the + coordination command line. The command line does not run under a shell, + and therefore cannot take advantage of shell features such as environment + variable expansion. If you want to take advantage of such features, you + should invoke the shell in the command line, for example using "cmd /c + MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command + line refers to file paths, it should use a relative path (relative to the + Task working directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Task runs. If the Pool that will run this Task has containerConfiguration + set, this must be set as well. If the Pool that will run this Task doesn't + have containerConfiguration set, this must not be set. When this is + specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR + (the root of Azure Batch directories on the node) are mapped into the + container, all Task environment variables are mapped into the container, + and the Task command line is executed in the container. Files produced in + the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to + the host disk, meaning that Batch file APIs will not be able to access + those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param exit_conditions: How the Batch service should respond when the Task + completes. + :type exit_conditions: ~azure.batch.models.ExitConditions + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. For + multi-instance Tasks, the resource files will only be downloaded to the + Compute Node on which the primary Task is executed. There is a maximum + size for the list of resource files. When the max size is exceeded, the + request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the Compute Node after running the command line. For multi-instance + Tasks, the files will only be uploaded from the Compute Node on which the + primary Task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the Task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param affinity_info: A locality hint that can be used by the Batch + service to select a Compute Node on which to start the new Task. + :type affinity_info: ~azure.batch.models.AffinityInformation + :param constraints: The execution constraints that apply to this Task. If + you do not specify constraints, the maxTaskRetryCount is the + maxTaskRetryCount specified for the Job, the maxWallClockTime is infinite, + and the retentionTime is 7 days. + :type constraints: ~azure.batch.models.TaskConstraints + :param user_identity: The user identity under which the Task runs. If + omitted, the Task runs as a non-administrative user unique to the Task. + :type user_identity: ~azure.batch.models.UserIdentity + :param multi_instance_settings: An object that indicates that the Task is + a multi-instance Task, and contains information about how to run the + multi-instance Task. + :type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings + :param depends_on: The Tasks that this Task depends on. This Task will not + be scheduled until all Tasks that it depends on have completed + successfully. If any of those Tasks fail and exhaust their retry counts, + this Task will never be scheduled. If the Job does not have + usesTaskDependencies set to true, and this element is present, the request + fails with error code TaskDependenciesNotSpecifiedOnJob. + :type depends_on: ~azure.batch.models.TaskDependencies + :param application_package_references: A list of Packages that the Batch + service will deploy to the Compute Node before running the command line. + Application packages are downloaded and deployed to a shared directory, + not the Task working directory. Therefore, if a referenced package is + already on the Node, and is up to date, then it is not re-downloaded; the + existing copy on the Compute Node is used. If a referenced Package cannot + be installed, for example because the package has been deleted or because + download failed, the Task fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the Task can use to perform Batch service operations. If this + property is set, the Batch service provides the Task with an + authentication token which can be used to authenticate Batch service + operations without requiring an Account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the Task can carry out using the token depend on the settings. For + example, a Task can request Job permissions in order to add other Tasks to + the Job, or check the status of the Job or of other Tasks under the Job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + """ + + _validation = { + 'id': {'required': True}, + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'}, + 'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + } + + def __init__(self, **kwargs): + super(TaskAddParameter, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.display_name = kwargs.get('display_name', None) + self.command_line = kwargs.get('command_line', None) + self.container_settings = kwargs.get('container_settings', None) + self.exit_conditions = kwargs.get('exit_conditions', None) + self.resource_files = kwargs.get('resource_files', None) + self.output_files = kwargs.get('output_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.affinity_info = kwargs.get('affinity_info', None) + self.constraints = kwargs.get('constraints', None) + self.user_identity = kwargs.get('user_identity', None) + self.multi_instance_settings = kwargs.get('multi_instance_settings', None) + self.depends_on = kwargs.get('depends_on', None) + self.application_package_references = kwargs.get('application_package_references', None) + self.authentication_token_settings = kwargs.get('authentication_token_settings', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_add_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_add_parameter_py3.py new file mode 100644 index 00000000..15121d18 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_add_parameter_py3.py @@ -0,0 +1,170 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddParameter(Model): + """An Azure Batch Task to add. + + Batch will retry Tasks when a recovery operation is triggered on a Node. + Examples of recovery operations include (but are not limited to) when an + unhealthy Node is rebooted or a Compute Node disappeared due to host + failure. Retries due to recovery operations are independent of and are not + counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, + an internal retry due to a recovery operation may occur. Because of this, + all Tasks should be idempotent. This means Tasks need to tolerate being + interrupted and restarted without causing any corruption or duplicate data. + The best practice for long running Tasks is to use some form of + checkpointing. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A string that uniquely identifies the Task within the + Job. The ID can contain any combination of alphanumeric characters + including hyphens and underscores, and cannot contain more than 64 + characters. The ID is case-preserving and case-insensitive (that is, you + may not have two IDs within a Job that differ only by case). + :type id: str + :param display_name: A display name for the Task. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :param command_line: Required. The command line of the Task. For + multi-instance Tasks, the command line is executed as the primary Task, + after the primary Task and all subtasks have finished executing the + coordination command line. The command line does not run under a shell, + and therefore cannot take advantage of shell features such as environment + variable expansion. If you want to take advantage of such features, you + should invoke the shell in the command line, for example using "cmd /c + MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command + line refers to file paths, it should use a relative path (relative to the + Task working directory), or use the Batch provided environment variable + (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + :type command_line: str + :param container_settings: The settings for the container under which the + Task runs. If the Pool that will run this Task has containerConfiguration + set, this must be set as well. If the Pool that will run this Task doesn't + have containerConfiguration set, this must not be set. When this is + specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR + (the root of Azure Batch directories on the node) are mapped into the + container, all Task environment variables are mapped into the container, + and the Task command line is executed in the container. Files produced in + the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to + the host disk, meaning that Batch file APIs will not be able to access + those files. + :type container_settings: ~azure.batch.models.TaskContainerSettings + :param exit_conditions: How the Batch service should respond when the Task + completes. + :type exit_conditions: ~azure.batch.models.ExitConditions + :param resource_files: A list of files that the Batch service will + download to the Compute Node before running the command line. For + multi-instance Tasks, the resource files will only be downloaded to the + Compute Node on which the primary Task is executed. There is a maximum + size for the list of resource files. When the max size is exceeded, the + request will fail and the response error code will be + RequestEntityTooLarge. If this occurs, the collection of ResourceFiles + must be reduced in size. This can be achieved using .zip files, + Application Packages, or Docker Containers. + :type resource_files: list[~azure.batch.models.ResourceFile] + :param output_files: A list of files that the Batch service will upload + from the Compute Node after running the command line. For multi-instance + Tasks, the files will only be uploaded from the Compute Node on which the + primary Task is executed. + :type output_files: list[~azure.batch.models.OutputFile] + :param environment_settings: A list of environment variable settings for + the Task. + :type environment_settings: list[~azure.batch.models.EnvironmentSetting] + :param affinity_info: A locality hint that can be used by the Batch + service to select a Compute Node on which to start the new Task. + :type affinity_info: ~azure.batch.models.AffinityInformation + :param constraints: The execution constraints that apply to this Task. If + you do not specify constraints, the maxTaskRetryCount is the + maxTaskRetryCount specified for the Job, the maxWallClockTime is infinite, + and the retentionTime is 7 days. + :type constraints: ~azure.batch.models.TaskConstraints + :param user_identity: The user identity under which the Task runs. If + omitted, the Task runs as a non-administrative user unique to the Task. + :type user_identity: ~azure.batch.models.UserIdentity + :param multi_instance_settings: An object that indicates that the Task is + a multi-instance Task, and contains information about how to run the + multi-instance Task. + :type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings + :param depends_on: The Tasks that this Task depends on. This Task will not + be scheduled until all Tasks that it depends on have completed + successfully. If any of those Tasks fail and exhaust their retry counts, + this Task will never be scheduled. If the Job does not have + usesTaskDependencies set to true, and this element is present, the request + fails with error code TaskDependenciesNotSpecifiedOnJob. + :type depends_on: ~azure.batch.models.TaskDependencies + :param application_package_references: A list of Packages that the Batch + service will deploy to the Compute Node before running the command line. + Application packages are downloaded and deployed to a shared directory, + not the Task working directory. Therefore, if a referenced package is + already on the Node, and is up to date, then it is not re-downloaded; the + existing copy on the Compute Node is used. If a referenced Package cannot + be installed, for example because the package has been deleted or because + download failed, the Task fails. + :type application_package_references: + list[~azure.batch.models.ApplicationPackageReference] + :param authentication_token_settings: The settings for an authentication + token that the Task can use to perform Batch service operations. If this + property is set, the Batch service provides the Task with an + authentication token which can be used to authenticate Batch service + operations without requiring an Account access key. The token is provided + via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations + that the Task can carry out using the token depend on the settings. For + example, a Task can request Job permissions in order to add other Tasks to + the Job, or check the status of the Job or of other Tasks under the Job. + :type authentication_token_settings: + ~azure.batch.models.AuthenticationTokenSettings + """ + + _validation = { + 'id': {'required': True}, + 'command_line': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'display_name': {'key': 'displayName', 'type': 'str'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, + 'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'}, + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'}, + 'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'}, + 'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'}, + 'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'}, + } + + def __init__(self, *, id: str, command_line: str, display_name: str=None, container_settings=None, exit_conditions=None, resource_files=None, output_files=None, environment_settings=None, affinity_info=None, constraints=None, user_identity=None, multi_instance_settings=None, depends_on=None, application_package_references=None, authentication_token_settings=None, **kwargs) -> None: + super(TaskAddParameter, self).__init__(**kwargs) + self.id = id + self.display_name = display_name + self.command_line = command_line + self.container_settings = container_settings + self.exit_conditions = exit_conditions + self.resource_files = resource_files + self.output_files = output_files + self.environment_settings = environment_settings + self.affinity_info = affinity_info + self.constraints = constraints + self.user_identity = user_identity + self.multi_instance_settings = multi_instance_settings + self.depends_on = depends_on + self.application_package_references = application_package_references + self.authentication_token_settings = authentication_token_settings diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_add_result.py b/azext/generated/sdk/batch/v2019_08_01/models/task_add_result.py new file mode 100644 index 00000000..fc4261e6 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_add_result.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddResult(Model): + """Result for a single Task added as part of an add Task collection operation. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. The status of the add Task request. Possible + values include: 'success', 'clientError', 'serverError' + :type status: str or ~azure.batch.models.TaskAddStatus + :param task_id: Required. The ID of the Task for which this is the result. + :type task_id: str + :param e_tag: The ETag of the Task, if the Task was successfully added. + You can use this to detect whether the Task has changed between requests. + In particular, you can be pass the ETag with an Update Task request to + specify that your changes should take effect only if nobody else has + modified the Job in the meantime. + :type e_tag: str + :param last_modified: The last modified time of the Task. + :type last_modified: datetime + :param location: The URL of the Task, if the Task was successfully added. + :type location: str + :param error: The error encountered while attempting to add the Task. + :type error: ~azure.batch.models.BatchError + """ + + _validation = { + 'status': {'required': True}, + 'task_id': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'TaskAddStatus'}, + 'task_id': {'key': 'taskId', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'location': {'key': 'location', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'BatchError'}, + } + + def __init__(self, **kwargs): + super(TaskAddResult, self).__init__(**kwargs) + self.status = kwargs.get('status', None) + self.task_id = kwargs.get('task_id', None) + self.e_tag = kwargs.get('e_tag', None) + self.last_modified = kwargs.get('last_modified', None) + self.location = kwargs.get('location', None) + self.error = kwargs.get('error', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_add_result_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_add_result_py3.py new file mode 100644 index 00000000..6a26f36f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_add_result_py3.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskAddResult(Model): + """Result for a single Task added as part of an add Task collection operation. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. The status of the add Task request. Possible + values include: 'success', 'clientError', 'serverError' + :type status: str or ~azure.batch.models.TaskAddStatus + :param task_id: Required. The ID of the Task for which this is the result. + :type task_id: str + :param e_tag: The ETag of the Task, if the Task was successfully added. + You can use this to detect whether the Task has changed between requests. + In particular, you can be pass the ETag with an Update Task request to + specify that your changes should take effect only if nobody else has + modified the Job in the meantime. + :type e_tag: str + :param last_modified: The last modified time of the Task. + :type last_modified: datetime + :param location: The URL of the Task, if the Task was successfully added. + :type location: str + :param error: The error encountered while attempting to add the Task. + :type error: ~azure.batch.models.BatchError + """ + + _validation = { + 'status': {'required': True}, + 'task_id': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'TaskAddStatus'}, + 'task_id': {'key': 'taskId', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'iso-8601'}, + 'location': {'key': 'location', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'BatchError'}, + } + + def __init__(self, *, status, task_id: str, e_tag: str=None, last_modified=None, location: str=None, error=None, **kwargs) -> None: + super(TaskAddResult, self).__init__(**kwargs) + self.status = status + self.task_id = task_id + self.e_tag = e_tag + self.last_modified = last_modified + self.location = location + self.error = error diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_constraints.py b/azext/generated/sdk/batch/v2019_08_01/models/task_constraints.py new file mode 100644 index 00000000..8fea5f57 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_constraints.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskConstraints(Model): + """Execution constraints to apply to a Task. + + :param max_wall_clock_time: The maximum elapsed time that the Task may + run, measured from the time the Task starts. If the Task does not complete + within the time limit, the Batch service terminates it. If this is not + specified, there is no time limit on how long the Task may run. + :type max_wall_clock_time: timedelta + :param retention_time: The minimum time to retain the Task directory on + the Compute Node where it ran, from the time it completes execution. After + this time, the Batch service may delete the Task directory and all its + contents. The default is 7 days, i.e. the Task directory will be retained + for 7 days unless the Compute Node is removed or the Job is deleted. + :type retention_time: timedelta + :param max_task_retry_count: The maximum number of times the Task may be + retried. The Batch service retries a Task if its exit code is nonzero. + Note that this value specifically controls the number of retries for the + Task executable due to a nonzero exit code. The Batch service will try the + Task once, and may then retry up to this limit. For example, if the + maximum retry count is 3, Batch tries the Task up to 4 times (one initial + try and 3 retries). If the maximum retry count is 0, the Batch service + does not retry the Task after the first attempt. If the maximum retry + count is -1, the Batch service retries the Task without limit. + :type max_task_retry_count: int + """ + + _attribute_map = { + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'retention_time': {'key': 'retentionTime', 'type': 'duration'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(TaskConstraints, self).__init__(**kwargs) + self.max_wall_clock_time = kwargs.get('max_wall_clock_time', None) + self.retention_time = kwargs.get('retention_time', None) + self.max_task_retry_count = kwargs.get('max_task_retry_count', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_constraints_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_constraints_py3.py new file mode 100644 index 00000000..70027cf0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_constraints_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskConstraints(Model): + """Execution constraints to apply to a Task. + + :param max_wall_clock_time: The maximum elapsed time that the Task may + run, measured from the time the Task starts. If the Task does not complete + within the time limit, the Batch service terminates it. If this is not + specified, there is no time limit on how long the Task may run. + :type max_wall_clock_time: timedelta + :param retention_time: The minimum time to retain the Task directory on + the Compute Node where it ran, from the time it completes execution. After + this time, the Batch service may delete the Task directory and all its + contents. The default is 7 days, i.e. the Task directory will be retained + for 7 days unless the Compute Node is removed or the Job is deleted. + :type retention_time: timedelta + :param max_task_retry_count: The maximum number of times the Task may be + retried. The Batch service retries a Task if its exit code is nonzero. + Note that this value specifically controls the number of retries for the + Task executable due to a nonzero exit code. The Batch service will try the + Task once, and may then retry up to this limit. For example, if the + maximum retry count is 3, Batch tries the Task up to 4 times (one initial + try and 3 retries). If the maximum retry count is 0, the Batch service + does not retry the Task after the first attempt. If the maximum retry + count is -1, the Batch service retries the Task without limit. + :type max_task_retry_count: int + """ + + _attribute_map = { + 'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'}, + 'retention_time': {'key': 'retentionTime', 'type': 'duration'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + } + + def __init__(self, *, max_wall_clock_time=None, retention_time=None, max_task_retry_count: int=None, **kwargs) -> None: + super(TaskConstraints, self).__init__(**kwargs) + self.max_wall_clock_time = max_wall_clock_time + self.retention_time = retention_time + self.max_task_retry_count = max_task_retry_count diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_container_execution_information.py b/azext/generated/sdk/batch/v2019_08_01/models/task_container_execution_information.py new file mode 100644 index 00000000..153ba043 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_container_execution_information.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskContainerExecutionInformation(Model): + """Contains information about the container which a Task is executing. + + :param container_id: The ID of the container. + :type container_id: str + :param state: The state of the container. This is the state of the + container according to the Docker service. It is equivalent to the status + field returned by "docker inspect". + :type state: str + :param error: Detailed error information about the container. This is the + detailed error string from the Docker service, if available. It is + equivalent to the error field returned by "docker inspect". + :type error: str + """ + + _attribute_map = { + 'container_id': {'key': 'containerId', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(TaskContainerExecutionInformation, self).__init__(**kwargs) + self.container_id = kwargs.get('container_id', None) + self.state = kwargs.get('state', None) + self.error = kwargs.get('error', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_container_execution_information_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_container_execution_information_py3.py new file mode 100644 index 00000000..04d96708 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_container_execution_information_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskContainerExecutionInformation(Model): + """Contains information about the container which a Task is executing. + + :param container_id: The ID of the container. + :type container_id: str + :param state: The state of the container. This is the state of the + container according to the Docker service. It is equivalent to the status + field returned by "docker inspect". + :type state: str + :param error: Detailed error information about the container. This is the + detailed error string from the Docker service, if available. It is + equivalent to the error field returned by "docker inspect". + :type error: str + """ + + _attribute_map = { + 'container_id': {'key': 'containerId', 'type': 'str'}, + 'state': {'key': 'state', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'str'}, + } + + def __init__(self, *, container_id: str=None, state: str=None, error: str=None, **kwargs) -> None: + super(TaskContainerExecutionInformation, self).__init__(**kwargs) + self.container_id = container_id + self.state = state + self.error = error diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_container_settings.py b/azext/generated/sdk/batch/v2019_08_01/models/task_container_settings.py new file mode 100644 index 00000000..210829b2 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_container_settings.py @@ -0,0 +1,55 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskContainerSettings(Model): + """The container settings for a Task. + + All required parameters must be populated in order to send to Azure. + + :param container_run_options: Additional options to the container create + command. These additional options are supplied as arguments to the "docker + create" command, in addition to those controlled by the Batch Service. + :type container_run_options: str + :param image_name: Required. The Image to use to create the container in + which the Task will run. This is the full Image reference, as would be + specified to "docker pull". If no tag is provided as part of the Image + name, the tag ":latest" is used as a default. + :type image_name: str + :param registry: The private registry which contains the container Image. + This setting can be omitted if was already provided at Pool creation. + :type registry: ~azure.batch.models.ContainerRegistry + :param working_directory: The location of the container Task working + directory. The default is 'taskWorkingDirectory'. Possible values include: + 'taskWorkingDirectory', 'containerImageDefault' + :type working_directory: str or + ~azure.batch.models.ContainerWorkingDirectory + """ + + _validation = { + 'image_name': {'required': True}, + } + + _attribute_map = { + 'container_run_options': {'key': 'containerRunOptions', 'type': 'str'}, + 'image_name': {'key': 'imageName', 'type': 'str'}, + 'registry': {'key': 'registry', 'type': 'ContainerRegistry'}, + 'working_directory': {'key': 'workingDirectory', 'type': 'ContainerWorkingDirectory'}, + } + + def __init__(self, **kwargs): + super(TaskContainerSettings, self).__init__(**kwargs) + self.container_run_options = kwargs.get('container_run_options', None) + self.image_name = kwargs.get('image_name', None) + self.registry = kwargs.get('registry', None) + self.working_directory = kwargs.get('working_directory', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_container_settings_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_container_settings_py3.py new file mode 100644 index 00000000..cf8fbca1 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_container_settings_py3.py @@ -0,0 +1,55 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskContainerSettings(Model): + """The container settings for a Task. + + All required parameters must be populated in order to send to Azure. + + :param container_run_options: Additional options to the container create + command. These additional options are supplied as arguments to the "docker + create" command, in addition to those controlled by the Batch Service. + :type container_run_options: str + :param image_name: Required. The Image to use to create the container in + which the Task will run. This is the full Image reference, as would be + specified to "docker pull". If no tag is provided as part of the Image + name, the tag ":latest" is used as a default. + :type image_name: str + :param registry: The private registry which contains the container Image. + This setting can be omitted if was already provided at Pool creation. + :type registry: ~azure.batch.models.ContainerRegistry + :param working_directory: The location of the container Task working + directory. The default is 'taskWorkingDirectory'. Possible values include: + 'taskWorkingDirectory', 'containerImageDefault' + :type working_directory: str or + ~azure.batch.models.ContainerWorkingDirectory + """ + + _validation = { + 'image_name': {'required': True}, + } + + _attribute_map = { + 'container_run_options': {'key': 'containerRunOptions', 'type': 'str'}, + 'image_name': {'key': 'imageName', 'type': 'str'}, + 'registry': {'key': 'registry', 'type': 'ContainerRegistry'}, + 'working_directory': {'key': 'workingDirectory', 'type': 'ContainerWorkingDirectory'}, + } + + def __init__(self, *, image_name: str, container_run_options: str=None, registry=None, working_directory=None, **kwargs) -> None: + super(TaskContainerSettings, self).__init__(**kwargs) + self.container_run_options = container_run_options + self.image_name = image_name + self.registry = registry + self.working_directory = working_directory diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_counts.py b/azext/generated/sdk/batch/v2019_08_01/models/task_counts.py new file mode 100644 index 00000000..7f40ffef --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_counts.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskCounts(Model): + """The Task counts for a Job. + + All required parameters must be populated in order to send to Azure. + + :param active: Required. The number of Tasks in the active state. + :type active: int + :param running: Required. The number of Tasks in the running or preparing + state. + :type running: int + :param completed: Required. The number of Tasks in the completed state. + :type completed: int + :param succeeded: Required. The number of Tasks which succeeded. A Task + succeeds if its result (found in the executionInfo property) is 'success'. + :type succeeded: int + :param failed: Required. The number of Tasks which failed. A Task fails if + its result (found in the executionInfo property) is 'failure'. + :type failed: int + """ + + _validation = { + 'active': {'required': True}, + 'running': {'required': True}, + 'completed': {'required': True}, + 'succeeded': {'required': True}, + 'failed': {'required': True}, + } + + _attribute_map = { + 'active': {'key': 'active', 'type': 'int'}, + 'running': {'key': 'running', 'type': 'int'}, + 'completed': {'key': 'completed', 'type': 'int'}, + 'succeeded': {'key': 'succeeded', 'type': 'int'}, + 'failed': {'key': 'failed', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(TaskCounts, self).__init__(**kwargs) + self.active = kwargs.get('active', None) + self.running = kwargs.get('running', None) + self.completed = kwargs.get('completed', None) + self.succeeded = kwargs.get('succeeded', None) + self.failed = kwargs.get('failed', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_counts_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_counts_py3.py new file mode 100644 index 00000000..55019794 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_counts_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskCounts(Model): + """The Task counts for a Job. + + All required parameters must be populated in order to send to Azure. + + :param active: Required. The number of Tasks in the active state. + :type active: int + :param running: Required. The number of Tasks in the running or preparing + state. + :type running: int + :param completed: Required. The number of Tasks in the completed state. + :type completed: int + :param succeeded: Required. The number of Tasks which succeeded. A Task + succeeds if its result (found in the executionInfo property) is 'success'. + :type succeeded: int + :param failed: Required. The number of Tasks which failed. A Task fails if + its result (found in the executionInfo property) is 'failure'. + :type failed: int + """ + + _validation = { + 'active': {'required': True}, + 'running': {'required': True}, + 'completed': {'required': True}, + 'succeeded': {'required': True}, + 'failed': {'required': True}, + } + + _attribute_map = { + 'active': {'key': 'active', 'type': 'int'}, + 'running': {'key': 'running', 'type': 'int'}, + 'completed': {'key': 'completed', 'type': 'int'}, + 'succeeded': {'key': 'succeeded', 'type': 'int'}, + 'failed': {'key': 'failed', 'type': 'int'}, + } + + def __init__(self, *, active: int, running: int, completed: int, succeeded: int, failed: int, **kwargs) -> None: + super(TaskCounts, self).__init__(**kwargs) + self.active = active + self.running = running + self.completed = completed + self.succeeded = succeeded + self.failed = failed diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_delete_options.py b/azext/generated/sdk/batch/v2019_08_01/models/task_delete_options.py new file mode 100644 index 00000000..2daf7608 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_delete_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskDeleteOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_delete_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_delete_options_py3.py new file mode 100644 index 00000000..4b836c65 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_delete_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskDeleteOptions(Model): + """Additional parameters for delete operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(TaskDeleteOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_dependencies.py b/azext/generated/sdk/batch/v2019_08_01/models/task_dependencies.py new file mode 100644 index 00000000..14f17278 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_dependencies.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskDependencies(Model): + """Specifies any dependencies of a Task. Any Task that is explicitly specified + or within a dependency range must complete before the dependant Task will + be scheduled. + + :param task_ids: The list of Task IDs that this Task depends on. All Tasks + in this list must complete successfully before the dependent Task can be + scheduled. The taskIds collection is limited to 64000 characters total + (i.e. the combined length of all Task IDs). If the taskIds collection + exceeds the maximum length, the Add Task request fails with error code + TaskDependencyListTooLong. In this case consider using Task ID ranges + instead. + :type task_ids: list[str] + :param task_id_ranges: The list of Task ID ranges that this Task depends + on. All Tasks in all ranges must complete successfully before the + dependent Task can be scheduled. + :type task_id_ranges: list[~azure.batch.models.TaskIdRange] + """ + + _attribute_map = { + 'task_ids': {'key': 'taskIds', 'type': '[str]'}, + 'task_id_ranges': {'key': 'taskIdRanges', 'type': '[TaskIdRange]'}, + } + + def __init__(self, **kwargs): + super(TaskDependencies, self).__init__(**kwargs) + self.task_ids = kwargs.get('task_ids', None) + self.task_id_ranges = kwargs.get('task_id_ranges', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_dependencies_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_dependencies_py3.py new file mode 100644 index 00000000..b739ef14 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_dependencies_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskDependencies(Model): + """Specifies any dependencies of a Task. Any Task that is explicitly specified + or within a dependency range must complete before the dependant Task will + be scheduled. + + :param task_ids: The list of Task IDs that this Task depends on. All Tasks + in this list must complete successfully before the dependent Task can be + scheduled. The taskIds collection is limited to 64000 characters total + (i.e. the combined length of all Task IDs). If the taskIds collection + exceeds the maximum length, the Add Task request fails with error code + TaskDependencyListTooLong. In this case consider using Task ID ranges + instead. + :type task_ids: list[str] + :param task_id_ranges: The list of Task ID ranges that this Task depends + on. All Tasks in all ranges must complete successfully before the + dependent Task can be scheduled. + :type task_id_ranges: list[~azure.batch.models.TaskIdRange] + """ + + _attribute_map = { + 'task_ids': {'key': 'taskIds', 'type': '[str]'}, + 'task_id_ranges': {'key': 'taskIdRanges', 'type': '[TaskIdRange]'}, + } + + def __init__(self, *, task_ids=None, task_id_ranges=None, **kwargs) -> None: + super(TaskDependencies, self).__init__(**kwargs) + self.task_ids = task_ids + self.task_id_ranges = task_id_ranges diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_execution_information.py b/azext/generated/sdk/batch/v2019_08_01/models/task_execution_information.py new file mode 100644 index 00000000..7d35208a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_execution_information.py @@ -0,0 +1,109 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskExecutionInformation(Model): + """Information about the execution of a Task. + + All required parameters must be populated in order to send to Azure. + + :param start_time: The time at which the Task started running. 'Running' + corresponds to the running state, so if the Task specifies resource files + or Packages, then the start time reflects the time at which the Task + started downloading or deploying these. If the Task has been restarted or + retried, this is the most recent time at which the Task started running. + This property is present only for Tasks that are in the running or + completed state. + :type start_time: datetime + :param end_time: The time at which the Task completed. This property is + set only if the Task is in the Completed state. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the Task + command line. This property is set only if the Task is in the completed + state. In general, the exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. + However, if the Batch service terminates the Task (due to timeout, or user + termination via the API) you may see an operating system-defined exit + code. + :type exit_code: int + :param container_info: Information about the container under which the + Task is executing. This property is set only if the Task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the Task failure, if any. This + property is set only if the Task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the Task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the Task could not be run) and + file upload errors are not retried. The Batch service will retry the Task + up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the Task + started running. This element is present only if the Task was retried + (i.e. retryCount is nonzero). If present, this is typically the same as + startTime, but may be different if the Task has been restarted for reasons + other than retry; for example, if the Compute Node was rebooted during a + retry, then the startTime is updated but the lastRetryTime is not. + :type last_retry_time: datetime + :param requeue_count: Required. The number of times the Task has been + requeued by the Batch service as the result of a user request. When the + user removes Compute Nodes from a Pool (by resizing/shrinking the pool) or + when the Job is being disabled, the user can specify that running Tasks on + the Compute Nodes be requeued for execution. This count tracks how many + times the Task has been requeued for these reasons. + :type requeue_count: int + :param last_requeue_time: The most recent time at which the Task has been + requeued by the Batch service as the result of a user request. This + property is set only if the requeueCount is nonzero. + :type last_requeue_time: datetime + :param result: The result of the Task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'retry_count': {'required': True}, + 'requeue_count': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'requeue_count': {'key': 'requeueCount', 'type': 'int'}, + 'last_requeue_time': {'key': 'lastRequeueTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, **kwargs): + super(TaskExecutionInformation, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.exit_code = kwargs.get('exit_code', None) + self.container_info = kwargs.get('container_info', None) + self.failure_info = kwargs.get('failure_info', None) + self.retry_count = kwargs.get('retry_count', None) + self.last_retry_time = kwargs.get('last_retry_time', None) + self.requeue_count = kwargs.get('requeue_count', None) + self.last_requeue_time = kwargs.get('last_requeue_time', None) + self.result = kwargs.get('result', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_execution_information_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_execution_information_py3.py new file mode 100644 index 00000000..e36b66b4 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_execution_information_py3.py @@ -0,0 +1,109 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskExecutionInformation(Model): + """Information about the execution of a Task. + + All required parameters must be populated in order to send to Azure. + + :param start_time: The time at which the Task started running. 'Running' + corresponds to the running state, so if the Task specifies resource files + or Packages, then the start time reflects the time at which the Task + started downloading or deploying these. If the Task has been restarted or + retried, this is the most recent time at which the Task started running. + This property is present only for Tasks that are in the running or + completed state. + :type start_time: datetime + :param end_time: The time at which the Task completed. This property is + set only if the Task is in the Completed state. + :type end_time: datetime + :param exit_code: The exit code of the program specified on the Task + command line. This property is set only if the Task is in the completed + state. In general, the exit code for a process reflects the specific + convention implemented by the application developer for that process. If + you use the exit code value to make decisions in your code, be sure that + you know the exit code convention used by the application process. + However, if the Batch service terminates the Task (due to timeout, or user + termination via the API) you may see an operating system-defined exit + code. + :type exit_code: int + :param container_info: Information about the container under which the + Task is executing. This property is set only if the Task runs in a + container context. + :type container_info: + ~azure.batch.models.TaskContainerExecutionInformation + :param failure_info: Information describing the Task failure, if any. This + property is set only if the Task is in the completed state and encountered + a failure. + :type failure_info: ~azure.batch.models.TaskFailureInformation + :param retry_count: Required. The number of times the Task has been + retried by the Batch service. Task application failures (non-zero exit + code) are retried, pre-processing errors (the Task could not be run) and + file upload errors are not retried. The Batch service will retry the Task + up to the limit specified by the constraints. + :type retry_count: int + :param last_retry_time: The most recent time at which a retry of the Task + started running. This element is present only if the Task was retried + (i.e. retryCount is nonzero). If present, this is typically the same as + startTime, but may be different if the Task has been restarted for reasons + other than retry; for example, if the Compute Node was rebooted during a + retry, then the startTime is updated but the lastRetryTime is not. + :type last_retry_time: datetime + :param requeue_count: Required. The number of times the Task has been + requeued by the Batch service as the result of a user request. When the + user removes Compute Nodes from a Pool (by resizing/shrinking the pool) or + when the Job is being disabled, the user can specify that running Tasks on + the Compute Nodes be requeued for execution. This count tracks how many + times the Task has been requeued for these reasons. + :type requeue_count: int + :param last_requeue_time: The most recent time at which the Task has been + requeued by the Batch service as the result of a user request. This + property is set only if the requeueCount is nonzero. + :type last_requeue_time: datetime + :param result: The result of the Task execution. If the value is 'failed', + then the details of the failure can be found in the failureInfo property. + Possible values include: 'success', 'failure' + :type result: str or ~azure.batch.models.TaskExecutionResult + """ + + _validation = { + 'retry_count': {'required': True}, + 'requeue_count': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'exit_code': {'key': 'exitCode', 'type': 'int'}, + 'container_info': {'key': 'containerInfo', 'type': 'TaskContainerExecutionInformation'}, + 'failure_info': {'key': 'failureInfo', 'type': 'TaskFailureInformation'}, + 'retry_count': {'key': 'retryCount', 'type': 'int'}, + 'last_retry_time': {'key': 'lastRetryTime', 'type': 'iso-8601'}, + 'requeue_count': {'key': 'requeueCount', 'type': 'int'}, + 'last_requeue_time': {'key': 'lastRequeueTime', 'type': 'iso-8601'}, + 'result': {'key': 'result', 'type': 'TaskExecutionResult'}, + } + + def __init__(self, *, retry_count: int, requeue_count: int, start_time=None, end_time=None, exit_code: int=None, container_info=None, failure_info=None, last_retry_time=None, last_requeue_time=None, result=None, **kwargs) -> None: + super(TaskExecutionInformation, self).__init__(**kwargs) + self.start_time = start_time + self.end_time = end_time + self.exit_code = exit_code + self.container_info = container_info + self.failure_info = failure_info + self.retry_count = retry_count + self.last_retry_time = last_retry_time + self.requeue_count = requeue_count + self.last_requeue_time = last_requeue_time + self.result = result diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_failure_information.py b/azext/generated/sdk/batch/v2019_08_01/models/task_failure_information.py new file mode 100644 index 00000000..dd8ee9e2 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_failure_information.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskFailureInformation(Model): + """Information about a Task failure. + + All required parameters must be populated in order to send to Azure. + + :param category: Required. The category of the Task error. Possible values + include: 'userError', 'serverError' + :type category: str or ~azure.batch.models.ErrorCategory + :param code: An identifier for the Task error. Codes are invariant and are + intended to be consumed programmatically. + :type code: str + :param message: A message describing the Task error, intended to be + suitable for display in a user interface. + :type message: str + :param details: A list of additional details related to the error. + :type details: list[~azure.batch.models.NameValuePair] + """ + + _validation = { + 'category': {'required': True}, + } + + _attribute_map = { + 'category': {'key': 'category', 'type': 'ErrorCategory'}, + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[NameValuePair]'}, + } + + def __init__(self, **kwargs): + super(TaskFailureInformation, self).__init__(**kwargs) + self.category = kwargs.get('category', None) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.details = kwargs.get('details', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_failure_information_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_failure_information_py3.py new file mode 100644 index 00000000..f5156975 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_failure_information_py3.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskFailureInformation(Model): + """Information about a Task failure. + + All required parameters must be populated in order to send to Azure. + + :param category: Required. The category of the Task error. Possible values + include: 'userError', 'serverError' + :type category: str or ~azure.batch.models.ErrorCategory + :param code: An identifier for the Task error. Codes are invariant and are + intended to be consumed programmatically. + :type code: str + :param message: A message describing the Task error, intended to be + suitable for display in a user interface. + :type message: str + :param details: A list of additional details related to the error. + :type details: list[~azure.batch.models.NameValuePair] + """ + + _validation = { + 'category': {'required': True}, + } + + _attribute_map = { + 'category': {'key': 'category', 'type': 'ErrorCategory'}, + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[NameValuePair]'}, + } + + def __init__(self, *, category, code: str=None, message: str=None, details=None, **kwargs) -> None: + super(TaskFailureInformation, self).__init__(**kwargs) + self.category = category + self.code = code + self.message = message + self.details = details diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_get_options.py b/azext/generated/sdk/batch/v2019_08_01/models/task_get_options.py new file mode 100644 index 00000000..08c1fd8a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_get_options.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskGetOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_get_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_get_options_py3.py new file mode 100644 index 00000000..68699028 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_get_options_py3.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskGetOptions(Model): + """Additional parameters for get operation. + + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, expand: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(TaskGetOptions, self).__init__(**kwargs) + self.select = select + self.expand = expand + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_id_range.py b/azext/generated/sdk/batch/v2019_08_01/models/task_id_range.py new file mode 100644 index 00000000..ffe1ce7a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_id_range.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskIdRange(Model): + """A range of Task IDs that a Task can depend on. All Tasks with IDs in the + range must complete successfully before the dependent Task can be + scheduled. + + The start and end of the range are inclusive. For example, if a range has + start 9 and end 12, then it represents Tasks '9', '10', '11' and '12'. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. The first Task ID in the range. + :type start: int + :param end: Required. The last Task ID in the range. + :type end: int + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'start', 'type': 'int'}, + 'end': {'key': 'end', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(TaskIdRange, self).__init__(**kwargs) + self.start = kwargs.get('start', None) + self.end = kwargs.get('end', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_id_range_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_id_range_py3.py new file mode 100644 index 00000000..a4646459 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_id_range_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskIdRange(Model): + """A range of Task IDs that a Task can depend on. All Tasks with IDs in the + range must complete successfully before the dependent Task can be + scheduled. + + The start and end of the range are inclusive. For example, if a range has + start 9 and end 12, then it represents Tasks '9', '10', '11' and '12'. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. The first Task ID in the range. + :type start: int + :param end: Required. The last Task ID in the range. + :type end: int + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'start', 'type': 'int'}, + 'end': {'key': 'end', 'type': 'int'}, + } + + def __init__(self, *, start: int, end: int, **kwargs) -> None: + super(TaskIdRange, self).__init__(**kwargs) + self.start = start + self.end = end diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_information.py b/azext/generated/sdk/batch/v2019_08_01/models/task_information.py new file mode 100644 index 00000000..87f4d9ad --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_information.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskInformation(Model): + """Information about a Task running on a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param task_url: The URL of the Task. + :type task_url: str + :param job_id: The ID of the Job to which the Task belongs. + :type job_id: str + :param task_id: The ID of the Task. + :type task_id: str + :param subtask_id: The ID of the subtask if the Task is a multi-instance + Task. + :type subtask_id: int + :param task_state: Required. The current state of the Task. Possible + values include: 'active', 'preparing', 'running', 'completed' + :type task_state: str or ~azure.batch.models.TaskState + :param execution_info: Information about the execution of the Task. + :type execution_info: ~azure.batch.models.TaskExecutionInformation + """ + + _validation = { + 'task_state': {'required': True}, + } + + _attribute_map = { + 'task_url': {'key': 'taskUrl', 'type': 'str'}, + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'task_id': {'key': 'taskId', 'type': 'str'}, + 'subtask_id': {'key': 'subtaskId', 'type': 'int'}, + 'task_state': {'key': 'taskState', 'type': 'TaskState'}, + 'execution_info': {'key': 'executionInfo', 'type': 'TaskExecutionInformation'}, + } + + def __init__(self, **kwargs): + super(TaskInformation, self).__init__(**kwargs) + self.task_url = kwargs.get('task_url', None) + self.job_id = kwargs.get('job_id', None) + self.task_id = kwargs.get('task_id', None) + self.subtask_id = kwargs.get('subtask_id', None) + self.task_state = kwargs.get('task_state', None) + self.execution_info = kwargs.get('execution_info', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_information_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_information_py3.py new file mode 100644 index 00000000..982abfe5 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_information_py3.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskInformation(Model): + """Information about a Task running on a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param task_url: The URL of the Task. + :type task_url: str + :param job_id: The ID of the Job to which the Task belongs. + :type job_id: str + :param task_id: The ID of the Task. + :type task_id: str + :param subtask_id: The ID of the subtask if the Task is a multi-instance + Task. + :type subtask_id: int + :param task_state: Required. The current state of the Task. Possible + values include: 'active', 'preparing', 'running', 'completed' + :type task_state: str or ~azure.batch.models.TaskState + :param execution_info: Information about the execution of the Task. + :type execution_info: ~azure.batch.models.TaskExecutionInformation + """ + + _validation = { + 'task_state': {'required': True}, + } + + _attribute_map = { + 'task_url': {'key': 'taskUrl', 'type': 'str'}, + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'task_id': {'key': 'taskId', 'type': 'str'}, + 'subtask_id': {'key': 'subtaskId', 'type': 'int'}, + 'task_state': {'key': 'taskState', 'type': 'TaskState'}, + 'execution_info': {'key': 'executionInfo', 'type': 'TaskExecutionInformation'}, + } + + def __init__(self, *, task_state, task_url: str=None, job_id: str=None, task_id: str=None, subtask_id: int=None, execution_info=None, **kwargs) -> None: + super(TaskInformation, self).__init__(**kwargs) + self.task_url = task_url + self.job_id = job_id + self.task_id = task_id + self.subtask_id = subtask_id + self.task_state = task_state + self.execution_info = execution_info diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_list_options.py b/azext/generated/sdk/batch/v2019_08_01/models/task_list_options.py new file mode 100644 index 00000000..e666a462 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_list_options.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Tasks can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskListOptions, self).__init__(**kwargs) + self.filter = kwargs.get('filter', None) + self.select = kwargs.get('select', None) + self.expand = kwargs.get('expand', None) + self.max_results = kwargs.get('max_results', 1000) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_list_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_list_options_py3.py new file mode 100644 index 00000000..0c129bf3 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_list_options_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskListOptions(Model): + """Additional parameters for list operation. + + :param filter: An OData $filter clause. For more information on + constructing this filter, see + https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. + :type filter: str + :param select: An OData $select clause. + :type select: str + :param expand: An OData $expand clause. + :type expand: str + :param max_results: The maximum number of items to return in the response. + A maximum of 1000 Tasks can be returned. Default value: 1000 . + :type max_results: int + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'filter': {'key': '', 'type': 'str'}, + 'select': {'key': '', 'type': 'str'}, + 'expand': {'key': '', 'type': 'str'}, + 'max_results': {'key': '', 'type': 'int'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, filter: str=None, select: str=None, expand: str=None, max_results: int=1000, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(TaskListOptions, self).__init__(**kwargs) + self.filter = filter + self.select = select + self.expand = expand + self.max_results = max_results + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_list_subtasks_options.py b/azext/generated/sdk/batch/v2019_08_01/models/task_list_subtasks_options.py new file mode 100644 index 00000000..8157cee2 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_list_subtasks_options.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskListSubtasksOptions(Model): + """Additional parameters for list_subtasks operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskListSubtasksOptions, self).__init__(**kwargs) + self.select = kwargs.get('select', None) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_list_subtasks_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_list_subtasks_options_py3.py new file mode 100644 index 00000000..b8810800 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_list_subtasks_options_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskListSubtasksOptions(Model): + """Additional parameters for list_subtasks operation. + + :param select: An OData $select clause. + :type select: str + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + """ + + _attribute_map = { + 'select': {'key': '', 'type': 'str'}, + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, select: str=None, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, **kwargs) -> None: + super(TaskListSubtasksOptions, self).__init__(**kwargs) + self.select = select + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_reactivate_options.py b/azext/generated/sdk/batch/v2019_08_01/models/task_reactivate_options.py new file mode 100644 index 00000000..fe074611 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_reactivate_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskReactivateOptions(Model): + """Additional parameters for reactivate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskReactivateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_reactivate_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_reactivate_options_py3.py new file mode 100644 index 00000000..bd39d6c9 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_reactivate_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskReactivateOptions(Model): + """Additional parameters for reactivate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(TaskReactivateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_scheduling_policy.py b/azext/generated/sdk/batch/v2019_08_01/models/task_scheduling_policy.py new file mode 100644 index 00000000..dc8d6dcb --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_scheduling_policy.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskSchedulingPolicy(Model): + """Specifies how Tasks should be distributed across Compute Nodes. + + All required parameters must be populated in order to send to Azure. + + :param node_fill_type: Required. How Tasks are distributed across Compute + Nodes in a Pool. If not specified, the default is spread. Possible values + include: 'spread', 'pack' + :type node_fill_type: str or ~azure.batch.models.ComputeNodeFillType + """ + + _validation = { + 'node_fill_type': {'required': True}, + } + + _attribute_map = { + 'node_fill_type': {'key': 'nodeFillType', 'type': 'ComputeNodeFillType'}, + } + + def __init__(self, **kwargs): + super(TaskSchedulingPolicy, self).__init__(**kwargs) + self.node_fill_type = kwargs.get('node_fill_type', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_scheduling_policy_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_scheduling_policy_py3.py new file mode 100644 index 00000000..61a47621 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_scheduling_policy_py3.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskSchedulingPolicy(Model): + """Specifies how Tasks should be distributed across Compute Nodes. + + All required parameters must be populated in order to send to Azure. + + :param node_fill_type: Required. How Tasks are distributed across Compute + Nodes in a Pool. If not specified, the default is spread. Possible values + include: 'spread', 'pack' + :type node_fill_type: str or ~azure.batch.models.ComputeNodeFillType + """ + + _validation = { + 'node_fill_type': {'required': True}, + } + + _attribute_map = { + 'node_fill_type': {'key': 'nodeFillType', 'type': 'ComputeNodeFillType'}, + } + + def __init__(self, *, node_fill_type, **kwargs) -> None: + super(TaskSchedulingPolicy, self).__init__(**kwargs) + self.node_fill_type = node_fill_type diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_statistics.py b/azext/generated/sdk/batch/v2019_08_01/models/task_statistics.py new file mode 100644 index 00000000..2d2f3169 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_statistics.py @@ -0,0 +1,100 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskStatistics(Model): + """Resource usage statistics for a Task. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all Compute Nodes) consumed by the Task. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all Compute Nodes) consumed by the Task. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of the Task. + The wall clock time is the elapsed time from when the Task started running + on a Compute Node to when it finished (or to the last time the statistics + were updated, if the Task had not finished by then). If the Task was + retried, this includes the wall clock time of all the Task retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by the Task. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by the Task. + :type write_iops: long + :param read_io_gi_b: Required. The total gibibytes read from disk by the + Task. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total gibibytes written to disk by the + Task. + :type write_io_gi_b: float + :param wait_time: Required. The total wait time of the Task. The wait time + for a Task is defined as the elapsed time between the creation of the Task + and the start of Task execution. (If the Task is retried due to failures, + the wait time is the time to the most recent Task execution.). + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(TaskStatistics, self).__init__(**kwargs) + self.url = kwargs.get('url', None) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.user_cpu_time = kwargs.get('user_cpu_time', None) + self.kernel_cpu_time = kwargs.get('kernel_cpu_time', None) + self.wall_clock_time = kwargs.get('wall_clock_time', None) + self.read_iops = kwargs.get('read_iops', None) + self.write_iops = kwargs.get('write_iops', None) + self.read_io_gi_b = kwargs.get('read_io_gi_b', None) + self.write_io_gi_b = kwargs.get('write_io_gi_b', None) + self.wait_time = kwargs.get('wait_time', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_statistics_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_statistics_py3.py new file mode 100644 index 00000000..c9f5c916 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_statistics_py3.py @@ -0,0 +1,100 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskStatistics(Model): + """Resource usage statistics for a Task. + + All required parameters must be populated in order to send to Azure. + + :param url: Required. The URL of the statistics. + :type url: str + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param user_cpu_time: Required. The total user mode CPU time (summed + across all cores and all Compute Nodes) consumed by the Task. + :type user_cpu_time: timedelta + :param kernel_cpu_time: Required. The total kernel mode CPU time (summed + across all cores and all Compute Nodes) consumed by the Task. + :type kernel_cpu_time: timedelta + :param wall_clock_time: Required. The total wall clock time of the Task. + The wall clock time is the elapsed time from when the Task started running + on a Compute Node to when it finished (or to the last time the statistics + were updated, if the Task had not finished by then). If the Task was + retried, this includes the wall clock time of all the Task retries. + :type wall_clock_time: timedelta + :param read_iops: Required. The total number of disk read operations made + by the Task. + :type read_iops: long + :param write_iops: Required. The total number of disk write operations + made by the Task. + :type write_iops: long + :param read_io_gi_b: Required. The total gibibytes read from disk by the + Task. + :type read_io_gi_b: float + :param write_io_gi_b: Required. The total gibibytes written to disk by the + Task. + :type write_io_gi_b: float + :param wait_time: Required. The total wait time of the Task. The wait time + for a Task is defined as the elapsed time between the creation of the Task + and the start of Task execution. (If the Task is retried due to failures, + the wait time is the time to the most recent Task execution.). + :type wait_time: timedelta + """ + + _validation = { + 'url': {'required': True}, + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'user_cpu_time': {'required': True}, + 'kernel_cpu_time': {'required': True}, + 'wall_clock_time': {'required': True}, + 'read_iops': {'required': True}, + 'write_iops': {'required': True}, + 'read_io_gi_b': {'required': True}, + 'write_io_gi_b': {'required': True}, + 'wait_time': {'required': True}, + } + + _attribute_map = { + 'url': {'key': 'url', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'user_cpu_time': {'key': 'userCPUTime', 'type': 'duration'}, + 'kernel_cpu_time': {'key': 'kernelCPUTime', 'type': 'duration'}, + 'wall_clock_time': {'key': 'wallClockTime', 'type': 'duration'}, + 'read_iops': {'key': 'readIOps', 'type': 'long'}, + 'write_iops': {'key': 'writeIOps', 'type': 'long'}, + 'read_io_gi_b': {'key': 'readIOGiB', 'type': 'float'}, + 'write_io_gi_b': {'key': 'writeIOGiB', 'type': 'float'}, + 'wait_time': {'key': 'waitTime', 'type': 'duration'}, + } + + def __init__(self, *, url: str, start_time, last_update_time, user_cpu_time, kernel_cpu_time, wall_clock_time, read_iops: int, write_iops: int, read_io_gi_b: float, write_io_gi_b: float, wait_time, **kwargs) -> None: + super(TaskStatistics, self).__init__(**kwargs) + self.url = url + self.start_time = start_time + self.last_update_time = last_update_time + self.user_cpu_time = user_cpu_time + self.kernel_cpu_time = kernel_cpu_time + self.wall_clock_time = wall_clock_time + self.read_iops = read_iops + self.write_iops = write_iops + self.read_io_gi_b = read_io_gi_b + self.write_io_gi_b = write_io_gi_b + self.wait_time = wait_time diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_terminate_options.py b/azext/generated/sdk/batch/v2019_08_01/models/task_terminate_options.py new file mode 100644 index 00000000..1908a9da --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_terminate_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskTerminateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_terminate_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_terminate_options_py3.py new file mode 100644 index 00000000..d967db3a --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_terminate_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskTerminateOptions(Model): + """Additional parameters for terminate operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(TaskTerminateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_update_options.py b/azext/generated/sdk/batch/v2019_08_01/models/task_update_options.py new file mode 100644 index 00000000..32e1ad82 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_update_options.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, **kwargs): + super(TaskUpdateOptions, self).__init__(**kwargs) + self.timeout = kwargs.get('timeout', 30) + self.client_request_id = kwargs.get('client_request_id', None) + self.return_client_request_id = kwargs.get('return_client_request_id', False) + self.ocp_date = kwargs.get('ocp_date', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_update_options_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_update_options_py3.py new file mode 100644 index 00000000..2a20ddf5 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_update_options_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskUpdateOptions(Model): + """Additional parameters for update operation. + + :param timeout: The maximum time that the server can spend processing the + request, in seconds. The default is 30 seconds. Default value: 30 . + :type timeout: int + :param client_request_id: The caller-generated request identity, in the + form of a GUID with no decoration such as curly braces, e.g. + 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. + :type client_request_id: str + :param return_client_request_id: Whether the server should return the + client-request-id in the response. Default value: False . + :type return_client_request_id: bool + :param ocp_date: The time the request was issued. Client libraries + typically set this to the current system clock time; set it explicitly if + you are calling the REST API directly. + :type ocp_date: datetime + :param if_match: An ETag value associated with the version of the resource + known to the client. The operation will be performed only if the + resource's current ETag on the service exactly matches the value specified + by the client. + :type if_match: str + :param if_none_match: An ETag value associated with the version of the + resource known to the client. The operation will be performed only if the + resource's current ETag on the service does not match the value specified + by the client. + :type if_none_match: str + :param if_modified_since: A timestamp indicating the last modified time of + the resource known to the client. The operation will be performed only if + the resource on the service has been modified since the specified time. + :type if_modified_since: datetime + :param if_unmodified_since: A timestamp indicating the last modified time + of the resource known to the client. The operation will be performed only + if the resource on the service has not been modified since the specified + time. + :type if_unmodified_since: datetime + """ + + _attribute_map = { + 'timeout': {'key': '', 'type': 'int'}, + 'client_request_id': {'key': '', 'type': 'str'}, + 'return_client_request_id': {'key': '', 'type': 'bool'}, + 'ocp_date': {'key': '', 'type': 'rfc-1123'}, + 'if_match': {'key': '', 'type': 'str'}, + 'if_none_match': {'key': '', 'type': 'str'}, + 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, + } + + def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: + super(TaskUpdateOptions, self).__init__(**kwargs) + self.timeout = timeout + self.client_request_id = client_request_id + self.return_client_request_id = return_client_request_id + self.ocp_date = ocp_date + self.if_match = if_match + self.if_none_match = if_none_match + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_update_parameter.py b/azext/generated/sdk/batch/v2019_08_01/models/task_update_parameter.py new file mode 100644 index 00000000..dfbcb1c0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_update_parameter.py @@ -0,0 +1,30 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskUpdateParameter(Model): + """The set of changes to be made to a Task. + + :param constraints: Constraints that apply to this Task. If omitted, the + Task is given the default constraints. For multi-instance Tasks, updating + the retention time applies only to the primary Task and not subtasks. + :type constraints: ~azure.batch.models.TaskConstraints + """ + + _attribute_map = { + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + } + + def __init__(self, **kwargs): + super(TaskUpdateParameter, self).__init__(**kwargs) + self.constraints = kwargs.get('constraints', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/task_update_parameter_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/task_update_parameter_py3.py new file mode 100644 index 00000000..7341a52d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/task_update_parameter_py3.py @@ -0,0 +1,30 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TaskUpdateParameter(Model): + """The set of changes to be made to a Task. + + :param constraints: Constraints that apply to this Task. If omitted, the + Task is given the default constraints. For multi-instance Tasks, updating + the retention time applies only to the primary Task and not subtasks. + :type constraints: ~azure.batch.models.TaskConstraints + """ + + _attribute_map = { + 'constraints': {'key': 'constraints', 'type': 'TaskConstraints'}, + } + + def __init__(self, *, constraints=None, **kwargs) -> None: + super(TaskUpdateParameter, self).__init__(**kwargs) + self.constraints = constraints diff --git a/azext/generated/sdk/batch/v2019_08_01/models/upload_batch_service_logs_configuration.py b/azext/generated/sdk/batch/v2019_08_01/models/upload_batch_service_logs_configuration.py new file mode 100644 index 00000000..ab05cce9 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/upload_batch_service_logs_configuration.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UploadBatchServiceLogsConfiguration(Model): + """The Azure Batch service log files upload configuration for a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param container_url: Required. The URL of the container within Azure Blob + Storage to which to upload the Batch Service log file(s). The URL must + include a Shared Access Signature (SAS) granting write permissions to the + container. The SAS duration must allow enough time for the upload to + finish. The start time for SAS is optional and recommended to not be + specified. + :type container_url: str + :param start_time: Required. The start of the time range from which to + upload Batch Service log file(s). Any log file containing a log message in + the time range will be uploaded. This means that the operation might + retrieve more logs than have been requested since the entire log file is + always uploaded, but the operation should not retrieve fewer logs than + have been requested. + :type start_time: datetime + :param end_time: The end of the time range from which to upload Batch + Service log file(s). Any log file containing a log message in the time + range will be uploaded. This means that the operation might retrieve more + logs than have been requested since the entire log file is always + uploaded, but the operation should not retrieve fewer logs than have been + requested. If omitted, the default is to upload all logs available after + the startTime. + :type end_time: datetime + """ + + _validation = { + 'container_url': {'required': True}, + 'start_time': {'required': True}, + } + + _attribute_map = { + 'container_url': {'key': 'containerUrl', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(UploadBatchServiceLogsConfiguration, self).__init__(**kwargs) + self.container_url = kwargs.get('container_url', None) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/upload_batch_service_logs_configuration_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/upload_batch_service_logs_configuration_py3.py new file mode 100644 index 00000000..26270c97 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/upload_batch_service_logs_configuration_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UploadBatchServiceLogsConfiguration(Model): + """The Azure Batch service log files upload configuration for a Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param container_url: Required. The URL of the container within Azure Blob + Storage to which to upload the Batch Service log file(s). The URL must + include a Shared Access Signature (SAS) granting write permissions to the + container. The SAS duration must allow enough time for the upload to + finish. The start time for SAS is optional and recommended to not be + specified. + :type container_url: str + :param start_time: Required. The start of the time range from which to + upload Batch Service log file(s). Any log file containing a log message in + the time range will be uploaded. This means that the operation might + retrieve more logs than have been requested since the entire log file is + always uploaded, but the operation should not retrieve fewer logs than + have been requested. + :type start_time: datetime + :param end_time: The end of the time range from which to upload Batch + Service log file(s). Any log file containing a log message in the time + range will be uploaded. This means that the operation might retrieve more + logs than have been requested since the entire log file is always + uploaded, but the operation should not retrieve fewer logs than have been + requested. If omitted, the default is to upload all logs available after + the startTime. + :type end_time: datetime + """ + + _validation = { + 'container_url': {'required': True}, + 'start_time': {'required': True}, + } + + _attribute_map = { + 'container_url': {'key': 'containerUrl', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + } + + def __init__(self, *, container_url: str, start_time, end_time=None, **kwargs) -> None: + super(UploadBatchServiceLogsConfiguration, self).__init__(**kwargs) + self.container_url = container_url + self.start_time = start_time + self.end_time = end_time diff --git a/azext/generated/sdk/batch/v2019_08_01/models/upload_batch_service_logs_result.py b/azext/generated/sdk/batch/v2019_08_01/models/upload_batch_service_logs_result.py new file mode 100644 index 00000000..f0928125 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/upload_batch_service_logs_result.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UploadBatchServiceLogsResult(Model): + """The result of uploading Batch service log files from a specific Compute + Node. + + All required parameters must be populated in order to send to Azure. + + :param virtual_directory_name: Required. The virtual directory within + Azure Blob Storage container to which the Batch Service log file(s) will + be uploaded. The virtual directory name is part of the blob name for each + log file uploaded, and it is built based poolId, nodeId and a unique + identifier. + :type virtual_directory_name: str + :param number_of_files_uploaded: Required. The number of log files which + will be uploaded. + :type number_of_files_uploaded: int + """ + + _validation = { + 'virtual_directory_name': {'required': True}, + 'number_of_files_uploaded': {'required': True}, + } + + _attribute_map = { + 'virtual_directory_name': {'key': 'virtualDirectoryName', 'type': 'str'}, + 'number_of_files_uploaded': {'key': 'numberOfFilesUploaded', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(UploadBatchServiceLogsResult, self).__init__(**kwargs) + self.virtual_directory_name = kwargs.get('virtual_directory_name', None) + self.number_of_files_uploaded = kwargs.get('number_of_files_uploaded', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/upload_batch_service_logs_result_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/upload_batch_service_logs_result_py3.py new file mode 100644 index 00000000..b23d902e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/upload_batch_service_logs_result_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UploadBatchServiceLogsResult(Model): + """The result of uploading Batch service log files from a specific Compute + Node. + + All required parameters must be populated in order to send to Azure. + + :param virtual_directory_name: Required. The virtual directory within + Azure Blob Storage container to which the Batch Service log file(s) will + be uploaded. The virtual directory name is part of the blob name for each + log file uploaded, and it is built based poolId, nodeId and a unique + identifier. + :type virtual_directory_name: str + :param number_of_files_uploaded: Required. The number of log files which + will be uploaded. + :type number_of_files_uploaded: int + """ + + _validation = { + 'virtual_directory_name': {'required': True}, + 'number_of_files_uploaded': {'required': True}, + } + + _attribute_map = { + 'virtual_directory_name': {'key': 'virtualDirectoryName', 'type': 'str'}, + 'number_of_files_uploaded': {'key': 'numberOfFilesUploaded', 'type': 'int'}, + } + + def __init__(self, *, virtual_directory_name: str, number_of_files_uploaded: int, **kwargs) -> None: + super(UploadBatchServiceLogsResult, self).__init__(**kwargs) + self.virtual_directory_name = virtual_directory_name + self.number_of_files_uploaded = number_of_files_uploaded diff --git a/azext/generated/sdk/batch/v2019_08_01/models/usage_statistics.py b/azext/generated/sdk/batch/v2019_08_01/models/usage_statistics.py new file mode 100644 index 00000000..848656b9 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/usage_statistics.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UsageStatistics(Model): + """Statistics related to Pool usage information. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param dedicated_core_time: Required. The aggregated wall-clock time of + the dedicated Compute Node cores being part of the Pool. + :type dedicated_core_time: timedelta + """ + + _validation = { + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'dedicated_core_time': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'dedicated_core_time': {'key': 'dedicatedCoreTime', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(UsageStatistics, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.last_update_time = kwargs.get('last_update_time', None) + self.dedicated_core_time = kwargs.get('dedicated_core_time', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/usage_statistics_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/usage_statistics_py3.py new file mode 100644 index 00000000..24ff9e4c --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/usage_statistics_py3.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UsageStatistics(Model): + """Statistics related to Pool usage information. + + All required parameters must be populated in order to send to Azure. + + :param start_time: Required. The start time of the time range covered by + the statistics. + :type start_time: datetime + :param last_update_time: Required. The time at which the statistics were + last updated. All statistics are limited to the range between startTime + and lastUpdateTime. + :type last_update_time: datetime + :param dedicated_core_time: Required. The aggregated wall-clock time of + the dedicated Compute Node cores being part of the Pool. + :type dedicated_core_time: timedelta + """ + + _validation = { + 'start_time': {'required': True}, + 'last_update_time': {'required': True}, + 'dedicated_core_time': {'required': True}, + } + + _attribute_map = { + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'}, + 'dedicated_core_time': {'key': 'dedicatedCoreTime', 'type': 'duration'}, + } + + def __init__(self, *, start_time, last_update_time, dedicated_core_time, **kwargs) -> None: + super(UsageStatistics, self).__init__(**kwargs) + self.start_time = start_time + self.last_update_time = last_update_time + self.dedicated_core_time = dedicated_core_time diff --git a/azext/generated/sdk/batch/v2019_08_01/models/user_account.py b/azext/generated/sdk/batch/v2019_08_01/models/user_account.py new file mode 100644 index 00000000..c1f7f276 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/user_account.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UserAccount(Model): + """Properties used to create a user used to execute Tasks on an Azure Batch + Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the user Account. + :type name: str + :param password: Required. The password for the user Account. + :type password: str + :param elevation_level: The elevation level of the user Account. The + default value is nonAdmin. Possible values include: 'nonAdmin', 'admin' + :type elevation_level: str or ~azure.batch.models.ElevationLevel + :param linux_user_configuration: The Linux-specific user configuration for + the user Account. This property is ignored if specified on a Windows Pool. + If not specified, the user is created with the default options. + :type linux_user_configuration: ~azure.batch.models.LinuxUserConfiguration + :param windows_user_configuration: The Windows-specific user configuration + for the user Account. This property can only be specified if the user is + on a Windows Pool. If not specified and on a Windows Pool, the user is + created with the default options. + :type windows_user_configuration: + ~azure.batch.models.WindowsUserConfiguration + """ + + _validation = { + 'name': {'required': True}, + 'password': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, + 'linux_user_configuration': {'key': 'linuxUserConfiguration', 'type': 'LinuxUserConfiguration'}, + 'windows_user_configuration': {'key': 'windowsUserConfiguration', 'type': 'WindowsUserConfiguration'}, + } + + def __init__(self, **kwargs): + super(UserAccount, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.password = kwargs.get('password', None) + self.elevation_level = kwargs.get('elevation_level', None) + self.linux_user_configuration = kwargs.get('linux_user_configuration', None) + self.windows_user_configuration = kwargs.get('windows_user_configuration', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/user_account_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/user_account_py3.py new file mode 100644 index 00000000..fc768441 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/user_account_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UserAccount(Model): + """Properties used to create a user used to execute Tasks on an Azure Batch + Compute Node. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the user Account. + :type name: str + :param password: Required. The password for the user Account. + :type password: str + :param elevation_level: The elevation level of the user Account. The + default value is nonAdmin. Possible values include: 'nonAdmin', 'admin' + :type elevation_level: str or ~azure.batch.models.ElevationLevel + :param linux_user_configuration: The Linux-specific user configuration for + the user Account. This property is ignored if specified on a Windows Pool. + If not specified, the user is created with the default options. + :type linux_user_configuration: ~azure.batch.models.LinuxUserConfiguration + :param windows_user_configuration: The Windows-specific user configuration + for the user Account. This property can only be specified if the user is + on a Windows Pool. If not specified and on a Windows Pool, the user is + created with the default options. + :type windows_user_configuration: + ~azure.batch.models.WindowsUserConfiguration + """ + + _validation = { + 'name': {'required': True}, + 'password': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, + 'linux_user_configuration': {'key': 'linuxUserConfiguration', 'type': 'LinuxUserConfiguration'}, + 'windows_user_configuration': {'key': 'windowsUserConfiguration', 'type': 'WindowsUserConfiguration'}, + } + + def __init__(self, *, name: str, password: str, elevation_level=None, linux_user_configuration=None, windows_user_configuration=None, **kwargs) -> None: + super(UserAccount, self).__init__(**kwargs) + self.name = name + self.password = password + self.elevation_level = elevation_level + self.linux_user_configuration = linux_user_configuration + self.windows_user_configuration = windows_user_configuration diff --git a/azext/generated/sdk/batch/v2019_08_01/models/user_identity.py b/azext/generated/sdk/batch/v2019_08_01/models/user_identity.py new file mode 100644 index 00000000..ce8ec66e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/user_identity.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UserIdentity(Model): + """The definition of the user identity under which the Task is run. + + Specify either the userName or autoUser property, but not both. + + :param user_name: The name of the user identity under which the Task is + run. The userName and autoUser properties are mutually exclusive; you must + specify one but not both. + :type user_name: str + :param auto_user: The auto user under which the Task is run. The userName + and autoUser properties are mutually exclusive; you must specify one but + not both. + :type auto_user: ~azure.batch.models.AutoUserSpecification + """ + + _attribute_map = { + 'user_name': {'key': 'username', 'type': 'str'}, + 'auto_user': {'key': 'autoUser', 'type': 'AutoUserSpecification'}, + } + + def __init__(self, **kwargs): + super(UserIdentity, self).__init__(**kwargs) + self.user_name = kwargs.get('user_name', None) + self.auto_user = kwargs.get('auto_user', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/user_identity_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/user_identity_py3.py new file mode 100644 index 00000000..bf913010 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/user_identity_py3.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UserIdentity(Model): + """The definition of the user identity under which the Task is run. + + Specify either the userName or autoUser property, but not both. + + :param user_name: The name of the user identity under which the Task is + run. The userName and autoUser properties are mutually exclusive; you must + specify one but not both. + :type user_name: str + :param auto_user: The auto user under which the Task is run. The userName + and autoUser properties are mutually exclusive; you must specify one but + not both. + :type auto_user: ~azure.batch.models.AutoUserSpecification + """ + + _attribute_map = { + 'user_name': {'key': 'username', 'type': 'str'}, + 'auto_user': {'key': 'autoUser', 'type': 'AutoUserSpecification'}, + } + + def __init__(self, *, user_name: str=None, auto_user=None, **kwargs) -> None: + super(UserIdentity, self).__init__(**kwargs) + self.user_name = user_name + self.auto_user = auto_user diff --git a/azext/generated/sdk/batch/v2019_08_01/models/virtual_machine_configuration.py b/azext/generated/sdk/batch/v2019_08_01/models/virtual_machine_configuration.py new file mode 100644 index 00000000..0a4c4d6e --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/virtual_machine_configuration.py @@ -0,0 +1,88 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class VirtualMachineConfiguration(Model): + """The configuration for Compute Nodes in a Pool based on the Azure Virtual + Machines infrastructure. + + All required parameters must be populated in order to send to Azure. + + :param image_reference: Required. A reference to the Azure Virtual + Machines Marketplace Image or the custom Virtual Machine Image to use. + :type image_reference: ~azure.batch.models.ImageReference + :param node_agent_sku_id: Required. The SKU of the Batch Compute Node + agent to be provisioned on Compute Nodes in the Pool. The Batch Compute + Node agent is a program that runs on each Compute Node in the Pool, and + provides the command-and-control interface between the Compute Node and + the Batch service. There are different implementations of the Compute Node + agent, known as SKUs, for different operating systems. You must specify a + Compute Node agent SKU which matches the selected Image reference. To get + the list of supported Compute Node agent SKUs along with their list of + verified Image references, see the 'List supported Compute Node agent + SKUs' operation. + :type node_agent_sku_id: str + :param windows_configuration: Windows operating system settings on the + virtual machine. This property must not be specified if the imageReference + property specifies a Linux OS Image. + :type windows_configuration: ~azure.batch.models.WindowsConfiguration + :param data_disks: The configuration for data disks attached to the + Compute Nodes in the Pool. This property must be specified if the Compute + Nodes in the Pool need to have empty data disks attached to them. This + cannot be updated. Each Compute Node gets its own disk (the disk is not a + file share). Existing disks cannot be attached, each attached disk is + empty. When the Compute Node is removed from the Pool, the disk and all + data associated with it is also deleted. The disk is not formatted after + being attached, it must be formatted before use - for more information see + https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux + and + https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. + :type data_disks: list[~azure.batch.models.DataDisk] + :param license_type: The type of on-premises license to be used when + deploying the operating system. This only applies to Images that contain + the Windows operating system, and should only be used when you hold valid + on-premises licenses for the Compute Nodes which will be deployed. If + omitted, no on-premises licensing discount is applied. Values are: + Windows_Server - The on-premises license is for Windows Server. + Windows_Client - The on-premises license is for Windows Client. + :type license_type: str + :param container_configuration: The container configuration for the Pool. + If specified, setup is performed on each Compute Node in the Pool to allow + Tasks to run in containers. All regular Tasks and Job manager Tasks run on + this Pool must specify the containerSettings property, and all other Tasks + may specify it. + :type container_configuration: ~azure.batch.models.ContainerConfiguration + """ + + _validation = { + 'image_reference': {'required': True}, + 'node_agent_sku_id': {'required': True}, + } + + _attribute_map = { + 'image_reference': {'key': 'imageReference', 'type': 'ImageReference'}, + 'node_agent_sku_id': {'key': 'nodeAgentSKUId', 'type': 'str'}, + 'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'}, + 'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'}, + 'license_type': {'key': 'licenseType', 'type': 'str'}, + 'container_configuration': {'key': 'containerConfiguration', 'type': 'ContainerConfiguration'}, + } + + def __init__(self, **kwargs): + super(VirtualMachineConfiguration, self).__init__(**kwargs) + self.image_reference = kwargs.get('image_reference', None) + self.node_agent_sku_id = kwargs.get('node_agent_sku_id', None) + self.windows_configuration = kwargs.get('windows_configuration', None) + self.data_disks = kwargs.get('data_disks', None) + self.license_type = kwargs.get('license_type', None) + self.container_configuration = kwargs.get('container_configuration', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/virtual_machine_configuration_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/virtual_machine_configuration_py3.py new file mode 100644 index 00000000..29a7e9d7 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/virtual_machine_configuration_py3.py @@ -0,0 +1,88 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class VirtualMachineConfiguration(Model): + """The configuration for Compute Nodes in a Pool based on the Azure Virtual + Machines infrastructure. + + All required parameters must be populated in order to send to Azure. + + :param image_reference: Required. A reference to the Azure Virtual + Machines Marketplace Image or the custom Virtual Machine Image to use. + :type image_reference: ~azure.batch.models.ImageReference + :param node_agent_sku_id: Required. The SKU of the Batch Compute Node + agent to be provisioned on Compute Nodes in the Pool. The Batch Compute + Node agent is a program that runs on each Compute Node in the Pool, and + provides the command-and-control interface between the Compute Node and + the Batch service. There are different implementations of the Compute Node + agent, known as SKUs, for different operating systems. You must specify a + Compute Node agent SKU which matches the selected Image reference. To get + the list of supported Compute Node agent SKUs along with their list of + verified Image references, see the 'List supported Compute Node agent + SKUs' operation. + :type node_agent_sku_id: str + :param windows_configuration: Windows operating system settings on the + virtual machine. This property must not be specified if the imageReference + property specifies a Linux OS Image. + :type windows_configuration: ~azure.batch.models.WindowsConfiguration + :param data_disks: The configuration for data disks attached to the + Compute Nodes in the Pool. This property must be specified if the Compute + Nodes in the Pool need to have empty data disks attached to them. This + cannot be updated. Each Compute Node gets its own disk (the disk is not a + file share). Existing disks cannot be attached, each attached disk is + empty. When the Compute Node is removed from the Pool, the disk and all + data associated with it is also deleted. The disk is not formatted after + being attached, it must be formatted before use - for more information see + https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux + and + https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. + :type data_disks: list[~azure.batch.models.DataDisk] + :param license_type: The type of on-premises license to be used when + deploying the operating system. This only applies to Images that contain + the Windows operating system, and should only be used when you hold valid + on-premises licenses for the Compute Nodes which will be deployed. If + omitted, no on-premises licensing discount is applied. Values are: + Windows_Server - The on-premises license is for Windows Server. + Windows_Client - The on-premises license is for Windows Client. + :type license_type: str + :param container_configuration: The container configuration for the Pool. + If specified, setup is performed on each Compute Node in the Pool to allow + Tasks to run in containers. All regular Tasks and Job manager Tasks run on + this Pool must specify the containerSettings property, and all other Tasks + may specify it. + :type container_configuration: ~azure.batch.models.ContainerConfiguration + """ + + _validation = { + 'image_reference': {'required': True}, + 'node_agent_sku_id': {'required': True}, + } + + _attribute_map = { + 'image_reference': {'key': 'imageReference', 'type': 'ImageReference'}, + 'node_agent_sku_id': {'key': 'nodeAgentSKUId', 'type': 'str'}, + 'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'}, + 'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'}, + 'license_type': {'key': 'licenseType', 'type': 'str'}, + 'container_configuration': {'key': 'containerConfiguration', 'type': 'ContainerConfiguration'}, + } + + def __init__(self, *, image_reference, node_agent_sku_id: str, windows_configuration=None, data_disks=None, license_type: str=None, container_configuration=None, **kwargs) -> None: + super(VirtualMachineConfiguration, self).__init__(**kwargs) + self.image_reference = image_reference + self.node_agent_sku_id = node_agent_sku_id + self.windows_configuration = windows_configuration + self.data_disks = data_disks + self.license_type = license_type + self.container_configuration = container_configuration diff --git a/azext/generated/sdk/batch/v2019_08_01/models/windows_configuration.py b/azext/generated/sdk/batch/v2019_08_01/models/windows_configuration.py new file mode 100644 index 00000000..6b27533d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/windows_configuration.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class WindowsConfiguration(Model): + """Windows operating system settings to apply to the virtual machine. + + :param enable_automatic_updates: Whether automatic updates are enabled on + the virtual machine. If omitted, the default value is true. + :type enable_automatic_updates: bool + """ + + _attribute_map = { + 'enable_automatic_updates': {'key': 'enableAutomaticUpdates', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(WindowsConfiguration, self).__init__(**kwargs) + self.enable_automatic_updates = kwargs.get('enable_automatic_updates', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/windows_configuration_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/windows_configuration_py3.py new file mode 100644 index 00000000..40a4aedf --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/windows_configuration_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class WindowsConfiguration(Model): + """Windows operating system settings to apply to the virtual machine. + + :param enable_automatic_updates: Whether automatic updates are enabled on + the virtual machine. If omitted, the default value is true. + :type enable_automatic_updates: bool + """ + + _attribute_map = { + 'enable_automatic_updates': {'key': 'enableAutomaticUpdates', 'type': 'bool'}, + } + + def __init__(self, *, enable_automatic_updates: bool=None, **kwargs) -> None: + super(WindowsConfiguration, self).__init__(**kwargs) + self.enable_automatic_updates = enable_automatic_updates diff --git a/azext/generated/sdk/batch/v2019_08_01/models/windows_user_configuration.py b/azext/generated/sdk/batch/v2019_08_01/models/windows_user_configuration.py new file mode 100644 index 00000000..7695d88d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/windows_user_configuration.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class WindowsUserConfiguration(Model): + """Properties used to create a user Account on a Windows Compute Node. + + :param login_mode: The login mode for the user. The default value for + VirtualMachineConfiguration Pools is 'batch' and for + CloudServiceConfiguration Pools is 'interactive'. Possible values include: + 'batch', 'interactive' + :type login_mode: str or ~azure.batch.models.LoginMode + """ + + _attribute_map = { + 'login_mode': {'key': 'loginMode', 'type': 'LoginMode'}, + } + + def __init__(self, **kwargs): + super(WindowsUserConfiguration, self).__init__(**kwargs) + self.login_mode = kwargs.get('login_mode', None) diff --git a/azext/generated/sdk/batch/v2019_08_01/models/windows_user_configuration_py3.py b/azext/generated/sdk/batch/v2019_08_01/models/windows_user_configuration_py3.py new file mode 100644 index 00000000..7eaf424f --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/models/windows_user_configuration_py3.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class WindowsUserConfiguration(Model): + """Properties used to create a user Account on a Windows Compute Node. + + :param login_mode: The login mode for the user. The default value for + VirtualMachineConfiguration Pools is 'batch' and for + CloudServiceConfiguration Pools is 'interactive'. Possible values include: + 'batch', 'interactive' + :type login_mode: str or ~azure.batch.models.LoginMode + """ + + _attribute_map = { + 'login_mode': {'key': 'loginMode', 'type': 'LoginMode'}, + } + + def __init__(self, *, login_mode=None, **kwargs) -> None: + super(WindowsUserConfiguration, self).__init__(**kwargs) + self.login_mode = login_mode diff --git a/azext/generated/sdk/batch/v2019_08_01/operations/__init__.py b/azext/generated/sdk/batch/v2019_08_01/operations/__init__.py new file mode 100644 index 00000000..5b1c54cc --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/operations/__init__.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_operations import ApplicationOperations +from .pool_operations import PoolOperations +from .account_operations import AccountOperations +from .job_operations import JobOperations +from .certificate_operations import CertificateOperations +from .file_operations import FileOperations +from .job_schedule_operations import JobScheduleOperations +from .task_operations import TaskOperations +from .compute_node_operations import ComputeNodeOperations + +__all__ = [ + 'ApplicationOperations', + 'PoolOperations', + 'AccountOperations', + 'JobOperations', + 'CertificateOperations', + 'FileOperations', + 'JobScheduleOperations', + 'TaskOperations', + 'ComputeNodeOperations', +] diff --git a/azext/generated/sdk/batch/v2019_08_01/operations/account_operations.py b/azext/generated/sdk/batch/v2019_08_01/operations/account_operations.py new file mode 100644 index 00000000..da00b19d --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/operations/account_operations.py @@ -0,0 +1,233 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class AccountOperations(object): + """AccountOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2019-08-01.10.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2019-08-01.10.0" + + self.config = config + + def list_supported_images( + self, account_list_supported_images_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all Virtual Machine Images supported by the Azure Batch service. + + :param account_list_supported_images_options: Additional parameters + for the operation + :type account_list_supported_images_options: + ~azure.batch.models.AccountListSupportedImagesOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of ImageInformation + :rtype: + ~azure.batch.models.ImageInformationPaged[~azure.batch.models.ImageInformation] + :raises: + :class:`BatchErrorException` + """ + filter = None + if account_list_supported_images_options is not None: + filter = account_list_supported_images_options.filter + max_results = None + if account_list_supported_images_options is not None: + max_results = account_list_supported_images_options.max_results + timeout = None + if account_list_supported_images_options is not None: + timeout = account_list_supported_images_options.timeout + client_request_id = None + if account_list_supported_images_options is not None: + client_request_id = account_list_supported_images_options.client_request_id + return_client_request_id = None + if account_list_supported_images_options is not None: + return_client_request_id = account_list_supported_images_options.return_client_request_id + ocp_date = None + if account_list_supported_images_options is not None: + ocp_date = account_list_supported_images_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_supported_images.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.ImageInformationPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.ImageInformationPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_supported_images.metadata = {'url': '/supportedimages'} + + def list_pool_node_counts( + self, account_list_pool_node_counts_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the number of Compute Nodes in each state, grouped by Pool. + + :param account_list_pool_node_counts_options: Additional parameters + for the operation + :type account_list_pool_node_counts_options: + ~azure.batch.models.AccountListPoolNodeCountsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of PoolNodeCounts + :rtype: + ~azure.batch.models.PoolNodeCountsPaged[~azure.batch.models.PoolNodeCounts] + :raises: + :class:`BatchErrorException` + """ + filter = None + if account_list_pool_node_counts_options is not None: + filter = account_list_pool_node_counts_options.filter + max_results = None + if account_list_pool_node_counts_options is not None: + max_results = account_list_pool_node_counts_options.max_results + timeout = None + if account_list_pool_node_counts_options is not None: + timeout = account_list_pool_node_counts_options.timeout + client_request_id = None + if account_list_pool_node_counts_options is not None: + client_request_id = account_list_pool_node_counts_options.client_request_id + return_client_request_id = None + if account_list_pool_node_counts_options is not None: + return_client_request_id = account_list_pool_node_counts_options.return_client_request_id + ocp_date = None + if account_list_pool_node_counts_options is not None: + ocp_date = account_list_pool_node_counts_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_pool_node_counts.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=10, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.PoolNodeCountsPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.PoolNodeCountsPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_pool_node_counts.metadata = {'url': '/nodecounts'} diff --git a/azext/generated/sdk/batch/v2019_08_01/operations/application_operations.py b/azext/generated/sdk/batch/v2019_08_01/operations/application_operations.py new file mode 100644 index 00000000..9c0416d0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/operations/application_operations.py @@ -0,0 +1,233 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class ApplicationOperations(object): + """ApplicationOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2019-08-01.10.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2019-08-01.10.0" + + self.config = config + + def list( + self, application_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the applications available in the specified Account. + + This operation returns only Applications and versions that are + available for use on Compute Nodes; that is, that can be used in an + Package reference. For administrator information about applications and + versions that are not yet available to Compute Nodes, use the Azure + portal or the Azure Resource Manager API. + + :param application_list_options: Additional parameters for the + operation + :type application_list_options: + ~azure.batch.models.ApplicationListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of ApplicationSummary + :rtype: + ~azure.batch.models.ApplicationSummaryPaged[~azure.batch.models.ApplicationSummary] + :raises: + :class:`BatchErrorException` + """ + max_results = None + if application_list_options is not None: + max_results = application_list_options.max_results + timeout = None + if application_list_options is not None: + timeout = application_list_options.timeout + client_request_id = None + if application_list_options is not None: + client_request_id = application_list_options.client_request_id + return_client_request_id = None + if application_list_options is not None: + return_client_request_id = application_list_options.return_client_request_id + ocp_date = None + if application_list_options is not None: + ocp_date = application_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.ApplicationSummaryPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.ApplicationSummaryPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/applications'} + + def get( + self, application_id, application_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified Application. + + This operation returns only Applications and versions that are + available for use on Compute Nodes; that is, that can be used in an + Package reference. For administrator information about Applications and + versions that are not yet available to Compute Nodes, use the Azure + portal or the Azure Resource Manager API. + + :param application_id: The ID of the Application. + :type application_id: str + :param application_get_options: Additional parameters for the + operation + :type application_get_options: + ~azure.batch.models.ApplicationGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ApplicationSummary or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.ApplicationSummary or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if application_get_options is not None: + timeout = application_get_options.timeout + client_request_id = None + if application_get_options is not None: + client_request_id = application_get_options.client_request_id + return_client_request_id = None + if application_get_options is not None: + return_client_request_id = application_get_options.return_client_request_id + ocp_date = None + if application_get_options is not None: + ocp_date = application_get_options.ocp_date + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'applicationId': self._serialize.url("application_id", application_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('ApplicationSummary', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/applications/{applicationId}'} diff --git a/azext/generated/sdk/batch/v2019_08_01/operations/certificate_operations.py b/azext/generated/sdk/batch/v2019_08_01/operations/certificate_operations.py new file mode 100644 index 00000000..3a2e5eae --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/operations/certificate_operations.py @@ -0,0 +1,514 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class CertificateOperations(object): + """CertificateOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2019-08-01.10.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2019-08-01.10.0" + + self.config = config + + def add( + self, certificate, certificate_add_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a Certificate to the specified Account. + + :param certificate: The Certificate to be added. + :type certificate: ~azure.batch.models.CertificateAddParameter + :param certificate_add_options: Additional parameters for the + operation + :type certificate_add_options: + ~azure.batch.models.CertificateAddOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if certificate_add_options is not None: + timeout = certificate_add_options.timeout + client_request_id = None + if certificate_add_options is not None: + client_request_id = certificate_add_options.client_request_id + return_client_request_id = None + if certificate_add_options is not None: + return_client_request_id = certificate_add_options.return_client_request_id + ocp_date = None + if certificate_add_options is not None: + ocp_date = certificate_add_options.ocp_date + + # Construct URL + url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(certificate, 'CertificateAddParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add.metadata = {'url': '/certificates'} + + def list( + self, certificate_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the Certificates that have been added to the specified + Account. + + :param certificate_list_options: Additional parameters for the + operation + :type certificate_list_options: + ~azure.batch.models.CertificateListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of Certificate + :rtype: + ~azure.batch.models.CertificatePaged[~azure.batch.models.Certificate] + :raises: + :class:`BatchErrorException` + """ + filter = None + if certificate_list_options is not None: + filter = certificate_list_options.filter + select = None + if certificate_list_options is not None: + select = certificate_list_options.select + max_results = None + if certificate_list_options is not None: + max_results = certificate_list_options.max_results + timeout = None + if certificate_list_options is not None: + timeout = certificate_list_options.timeout + client_request_id = None + if certificate_list_options is not None: + client_request_id = certificate_list_options.client_request_id + return_client_request_id = None + if certificate_list_options is not None: + return_client_request_id = certificate_list_options.return_client_request_id + ocp_date = None + if certificate_list_options is not None: + ocp_date = certificate_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CertificatePaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CertificatePaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/certificates'} + + def cancel_deletion( + self, thumbprint_algorithm, thumbprint, certificate_cancel_deletion_options=None, custom_headers=None, raw=False, **operation_config): + """Cancels a failed deletion of a Certificate from the specified Account. + + If you try to delete a Certificate that is being used by a Pool or + Compute Node, the status of the Certificate changes to deleteFailed. If + you decide that you want to continue using the Certificate, you can use + this operation to set the status of the Certificate back to active. If + you intend to delete the Certificate, you do not need to run this + operation after the deletion failed. You must make sure that the + Certificate is not being used by any resources, and then you can try + again to delete the Certificate. + + :param thumbprint_algorithm: The algorithm used to derive the + thumbprint parameter. This must be sha1. + :type thumbprint_algorithm: str + :param thumbprint: The thumbprint of the Certificate being deleted. + :type thumbprint: str + :param certificate_cancel_deletion_options: Additional parameters for + the operation + :type certificate_cancel_deletion_options: + ~azure.batch.models.CertificateCancelDeletionOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if certificate_cancel_deletion_options is not None: + timeout = certificate_cancel_deletion_options.timeout + client_request_id = None + if certificate_cancel_deletion_options is not None: + client_request_id = certificate_cancel_deletion_options.client_request_id + return_client_request_id = None + if certificate_cancel_deletion_options is not None: + return_client_request_id = certificate_cancel_deletion_options.return_client_request_id + ocp_date = None + if certificate_cancel_deletion_options is not None: + ocp_date = certificate_cancel_deletion_options.ocp_date + + # Construct URL + url = self.cancel_deletion.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'thumbprintAlgorithm': self._serialize.url("thumbprint_algorithm", thumbprint_algorithm, 'str'), + 'thumbprint': self._serialize.url("thumbprint", thumbprint, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + cancel_deletion.metadata = {'url': '/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})/canceldelete'} + + def delete( + self, thumbprint_algorithm, thumbprint, certificate_delete_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a Certificate from the specified Account. + + You cannot delete a Certificate if a resource (Pool or Compute Node) is + using it. Before you can delete a Certificate, you must therefore make + sure that the Certificate is not associated with any existing Pools, + the Certificate is not installed on any Nodes (even if you remove a + Certificate from a Pool, it is not removed from existing Compute Nodes + in that Pool until they restart), and no running Tasks depend on the + Certificate. If you try to delete a Certificate that is in use, the + deletion fails. The Certificate status changes to deleteFailed. You can + use Cancel Delete Certificate to set the status back to active if you + decide that you want to continue using the Certificate. + + :param thumbprint_algorithm: The algorithm used to derive the + thumbprint parameter. This must be sha1. + :type thumbprint_algorithm: str + :param thumbprint: The thumbprint of the Certificate to be deleted. + :type thumbprint: str + :param certificate_delete_options: Additional parameters for the + operation + :type certificate_delete_options: + ~azure.batch.models.CertificateDeleteOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if certificate_delete_options is not None: + timeout = certificate_delete_options.timeout + client_request_id = None + if certificate_delete_options is not None: + client_request_id = certificate_delete_options.client_request_id + return_client_request_id = None + if certificate_delete_options is not None: + return_client_request_id = certificate_delete_options.return_client_request_id + ocp_date = None + if certificate_delete_options is not None: + ocp_date = certificate_delete_options.ocp_date + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'thumbprintAlgorithm': self._serialize.url("thumbprint_algorithm", thumbprint_algorithm, 'str'), + 'thumbprint': self._serialize.url("thumbprint", thumbprint, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + }) + return client_raw_response + delete.metadata = {'url': '/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})'} + + def get( + self, thumbprint_algorithm, thumbprint, certificate_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified Certificate. + + :param thumbprint_algorithm: The algorithm used to derive the + thumbprint parameter. This must be sha1. + :type thumbprint_algorithm: str + :param thumbprint: The thumbprint of the Certificate to get. + :type thumbprint: str + :param certificate_get_options: Additional parameters for the + operation + :type certificate_get_options: + ~azure.batch.models.CertificateGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: Certificate or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.Certificate or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if certificate_get_options is not None: + select = certificate_get_options.select + timeout = None + if certificate_get_options is not None: + timeout = certificate_get_options.timeout + client_request_id = None + if certificate_get_options is not None: + client_request_id = certificate_get_options.client_request_id + return_client_request_id = None + if certificate_get_options is not None: + return_client_request_id = certificate_get_options.return_client_request_id + ocp_date = None + if certificate_get_options is not None: + ocp_date = certificate_get_options.ocp_date + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'thumbprintAlgorithm': self._serialize.url("thumbprint_algorithm", thumbprint_algorithm, 'str'), + 'thumbprint': self._serialize.url("thumbprint", thumbprint, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('Certificate', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/certificates(thumbprintAlgorithm={thumbprintAlgorithm},thumbprint={thumbprint})'} diff --git a/azext/generated/sdk/batch/v2019_08_01/operations/compute_node_operations.py b/azext/generated/sdk/batch/v2019_08_01/operations/compute_node_operations.py new file mode 100644 index 00000000..42e9c580 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/operations/compute_node_operations.py @@ -0,0 +1,1242 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class ComputeNodeOperations(object): + """ComputeNodeOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2019-08-01.10.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2019-08-01.10.0" + + self.config = config + + def add_user( + self, pool_id, node_id, user, compute_node_add_user_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a user Account to the specified Compute Node. + + You can add a user Account to a Compute Node only when it is in the + idle or running state. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the machine on which you want to create a + user Account. + :type node_id: str + :param user: The user Account to be created. + :type user: ~azure.batch.models.ComputeNodeUser + :param compute_node_add_user_options: Additional parameters for the + operation + :type compute_node_add_user_options: + ~azure.batch.models.ComputeNodeAddUserOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_add_user_options is not None: + timeout = compute_node_add_user_options.timeout + client_request_id = None + if compute_node_add_user_options is not None: + client_request_id = compute_node_add_user_options.client_request_id + return_client_request_id = None + if compute_node_add_user_options is not None: + return_client_request_id = compute_node_add_user_options.return_client_request_id + ocp_date = None + if compute_node_add_user_options is not None: + ocp_date = compute_node_add_user_options.ocp_date + + # Construct URL + url = self.add_user.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(user, 'ComputeNodeUser') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add_user.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/users'} + + def delete_user( + self, pool_id, node_id, user_name, compute_node_delete_user_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a user Account from the specified Compute Node. + + You can delete a user Account to a Compute Node only when it is in the + idle or running state. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the machine on which you want to delete a + user Account. + :type node_id: str + :param user_name: The name of the user Account to delete. + :type user_name: str + :param compute_node_delete_user_options: Additional parameters for the + operation + :type compute_node_delete_user_options: + ~azure.batch.models.ComputeNodeDeleteUserOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_delete_user_options is not None: + timeout = compute_node_delete_user_options.timeout + client_request_id = None + if compute_node_delete_user_options is not None: + client_request_id = compute_node_delete_user_options.client_request_id + return_client_request_id = None + if compute_node_delete_user_options is not None: + return_client_request_id = compute_node_delete_user_options.return_client_request_id + ocp_date = None + if compute_node_delete_user_options is not None: + ocp_date = compute_node_delete_user_options.ocp_date + + # Construct URL + url = self.delete_user.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str'), + 'userName': self._serialize.url("user_name", user_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete_user.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/users/{userName}'} + + def update_user( + self, pool_id, node_id, user_name, node_update_user_parameter, compute_node_update_user_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the password and expiration time of a user Account on the + specified Compute Node. + + This operation replaces of all the updatable properties of the Account. + For example, if the expiryTime element is not specified, the current + value is replaced with the default value, not left unmodified. You can + update a user Account on a Compute Node only when it is in the idle or + running state. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the machine on which you want to update a + user Account. + :type node_id: str + :param user_name: The name of the user Account to update. + :type user_name: str + :param node_update_user_parameter: The parameters for the request. + :type node_update_user_parameter: + ~azure.batch.models.NodeUpdateUserParameter + :param compute_node_update_user_options: Additional parameters for the + operation + :type compute_node_update_user_options: + ~azure.batch.models.ComputeNodeUpdateUserOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_update_user_options is not None: + timeout = compute_node_update_user_options.timeout + client_request_id = None + if compute_node_update_user_options is not None: + client_request_id = compute_node_update_user_options.client_request_id + return_client_request_id = None + if compute_node_update_user_options is not None: + return_client_request_id = compute_node_update_user_options.return_client_request_id + ocp_date = None + if compute_node_update_user_options is not None: + ocp_date = compute_node_update_user_options.ocp_date + + # Construct URL + url = self.update_user.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str'), + 'userName': self._serialize.url("user_name", user_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(node_update_user_parameter, 'NodeUpdateUserParameter') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + update_user.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/users/{userName}'} + + def get( + self, pool_id, node_id, compute_node_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified Compute Node. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node that you want to get + information about. + :type node_id: str + :param compute_node_get_options: Additional parameters for the + operation + :type compute_node_get_options: + ~azure.batch.models.ComputeNodeGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ComputeNode or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.ComputeNode or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if compute_node_get_options is not None: + select = compute_node_get_options.select + timeout = None + if compute_node_get_options is not None: + timeout = compute_node_get_options.timeout + client_request_id = None + if compute_node_get_options is not None: + client_request_id = compute_node_get_options.client_request_id + return_client_request_id = None + if compute_node_get_options is not None: + return_client_request_id = compute_node_get_options.return_client_request_id + ocp_date = None + if compute_node_get_options is not None: + ocp_date = compute_node_get_options.ocp_date + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('ComputeNode', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}'} + + def reboot( + self, pool_id, node_id, node_reboot_option=None, compute_node_reboot_options=None, custom_headers=None, raw=False, **operation_config): + """Restarts the specified Compute Node. + + You can restart a Compute Node only if it is in an idle or running + state. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node that you want to restart. + :type node_id: str + :param node_reboot_option: When to reboot the Compute Node and what to + do with currently running Tasks. The default value is requeue. + Possible values include: 'requeue', 'terminate', 'taskCompletion', + 'retainedData' + :type node_reboot_option: str or + ~azure.batch.models.ComputeNodeRebootOption + :param compute_node_reboot_options: Additional parameters for the + operation + :type compute_node_reboot_options: + ~azure.batch.models.ComputeNodeRebootOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_reboot_options is not None: + timeout = compute_node_reboot_options.timeout + client_request_id = None + if compute_node_reboot_options is not None: + client_request_id = compute_node_reboot_options.client_request_id + return_client_request_id = None + if compute_node_reboot_options is not None: + return_client_request_id = compute_node_reboot_options.return_client_request_id + ocp_date = None + if compute_node_reboot_options is not None: + ocp_date = compute_node_reboot_options.ocp_date + node_reboot_parameter = None + if node_reboot_option is not None: + node_reboot_parameter = models.NodeRebootParameter(node_reboot_option=node_reboot_option) + + # Construct URL + url = self.reboot.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + if node_reboot_parameter is not None: + body_content = self._serialize.body(node_reboot_parameter, 'NodeRebootParameter') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + reboot.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/reboot'} + + def reimage( + self, pool_id, node_id, node_reimage_option=None, compute_node_reimage_options=None, custom_headers=None, raw=False, **operation_config): + """Reinstalls the operating system on the specified Compute Node. + + You can reinstall the operating system on a Compute Node only if it is + in an idle or running state. This API can be invoked only on Pools + created with the cloud service configuration property. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node that you want to restart. + :type node_id: str + :param node_reimage_option: When to reimage the Compute Node and what + to do with currently running Tasks. The default value is requeue. + Possible values include: 'requeue', 'terminate', 'taskCompletion', + 'retainedData' + :type node_reimage_option: str or + ~azure.batch.models.ComputeNodeReimageOption + :param compute_node_reimage_options: Additional parameters for the + operation + :type compute_node_reimage_options: + ~azure.batch.models.ComputeNodeReimageOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_reimage_options is not None: + timeout = compute_node_reimage_options.timeout + client_request_id = None + if compute_node_reimage_options is not None: + client_request_id = compute_node_reimage_options.client_request_id + return_client_request_id = None + if compute_node_reimage_options is not None: + return_client_request_id = compute_node_reimage_options.return_client_request_id + ocp_date = None + if compute_node_reimage_options is not None: + ocp_date = compute_node_reimage_options.ocp_date + node_reimage_parameter = None + if node_reimage_option is not None: + node_reimage_parameter = models.NodeReimageParameter(node_reimage_option=node_reimage_option) + + # Construct URL + url = self.reimage.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + if node_reimage_parameter is not None: + body_content = self._serialize.body(node_reimage_parameter, 'NodeReimageParameter') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + reimage.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/reimage'} + + def disable_scheduling( + self, pool_id, node_id, node_disable_scheduling_option=None, compute_node_disable_scheduling_options=None, custom_headers=None, raw=False, **operation_config): + """Disables Task scheduling on the specified Compute Node. + + You can disable Task scheduling on a Compute Node only if its current + scheduling state is enabled. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node on which you want to + disable Task scheduling. + :type node_id: str + :param node_disable_scheduling_option: What to do with currently + running Tasks when disabling Task scheduling on the Compute Node. The + default value is requeue. Possible values include: 'requeue', + 'terminate', 'taskCompletion' + :type node_disable_scheduling_option: str or + ~azure.batch.models.DisableComputeNodeSchedulingOption + :param compute_node_disable_scheduling_options: Additional parameters + for the operation + :type compute_node_disable_scheduling_options: + ~azure.batch.models.ComputeNodeDisableSchedulingOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_disable_scheduling_options is not None: + timeout = compute_node_disable_scheduling_options.timeout + client_request_id = None + if compute_node_disable_scheduling_options is not None: + client_request_id = compute_node_disable_scheduling_options.client_request_id + return_client_request_id = None + if compute_node_disable_scheduling_options is not None: + return_client_request_id = compute_node_disable_scheduling_options.return_client_request_id + ocp_date = None + if compute_node_disable_scheduling_options is not None: + ocp_date = compute_node_disable_scheduling_options.ocp_date + node_disable_scheduling_parameter = None + if node_disable_scheduling_option is not None: + node_disable_scheduling_parameter = models.NodeDisableSchedulingParameter(node_disable_scheduling_option=node_disable_scheduling_option) + + # Construct URL + url = self.disable_scheduling.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + if node_disable_scheduling_parameter is not None: + body_content = self._serialize.body(node_disable_scheduling_parameter, 'NodeDisableSchedulingParameter') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + disable_scheduling.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/disablescheduling'} + + def enable_scheduling( + self, pool_id, node_id, compute_node_enable_scheduling_options=None, custom_headers=None, raw=False, **operation_config): + """Enables Task scheduling on the specified Compute Node. + + You can enable Task scheduling on a Compute Node only if its current + scheduling state is disabled. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node on which you want to enable + Task scheduling. + :type node_id: str + :param compute_node_enable_scheduling_options: Additional parameters + for the operation + :type compute_node_enable_scheduling_options: + ~azure.batch.models.ComputeNodeEnableSchedulingOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_enable_scheduling_options is not None: + timeout = compute_node_enable_scheduling_options.timeout + client_request_id = None + if compute_node_enable_scheduling_options is not None: + client_request_id = compute_node_enable_scheduling_options.client_request_id + return_client_request_id = None + if compute_node_enable_scheduling_options is not None: + return_client_request_id = compute_node_enable_scheduling_options.return_client_request_id + ocp_date = None + if compute_node_enable_scheduling_options is not None: + ocp_date = compute_node_enable_scheduling_options.ocp_date + + # Construct URL + url = self.enable_scheduling.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + enable_scheduling.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/enablescheduling'} + + def get_remote_login_settings( + self, pool_id, node_id, compute_node_get_remote_login_settings_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the settings required for remote login to a Compute Node. + + Before you can remotely login to a Compute Node using the remote login + settings, you must create a user Account on the Compute Node. This API + can be invoked only on Pools created with the virtual machine + configuration property. For Pools created with a cloud service + configuration, see the GetRemoteDesktop API. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node for which to obtain the + remote login settings. + :type node_id: str + :param compute_node_get_remote_login_settings_options: Additional + parameters for the operation + :type compute_node_get_remote_login_settings_options: + ~azure.batch.models.ComputeNodeGetRemoteLoginSettingsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ComputeNodeGetRemoteLoginSettingsResult or ClientRawResponse + if raw=true + :rtype: ~azure.batch.models.ComputeNodeGetRemoteLoginSettingsResult or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_get_remote_login_settings_options is not None: + timeout = compute_node_get_remote_login_settings_options.timeout + client_request_id = None + if compute_node_get_remote_login_settings_options is not None: + client_request_id = compute_node_get_remote_login_settings_options.client_request_id + return_client_request_id = None + if compute_node_get_remote_login_settings_options is not None: + return_client_request_id = compute_node_get_remote_login_settings_options.return_client_request_id + ocp_date = None + if compute_node_get_remote_login_settings_options is not None: + ocp_date = compute_node_get_remote_login_settings_options.ocp_date + + # Construct URL + url = self.get_remote_login_settings.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('ComputeNodeGetRemoteLoginSettingsResult', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_remote_login_settings.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/remoteloginsettings'} + + def get_remote_desktop( + self, pool_id, node_id, compute_node_get_remote_desktop_options=None, custom_headers=None, raw=False, callback=None, **operation_config): + """Gets the Remote Desktop Protocol file for the specified Compute Node. + + Before you can access a Compute Node by using the RDP file, you must + create a user Account on the Compute Node. This API can only be invoked + on Pools created with a cloud service configuration. For Pools created + with a virtual machine configuration, see the GetRemoteLoginSettings + API. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node for which you want to get + the Remote Desktop Protocol file. + :type node_id: str + :param compute_node_get_remote_desktop_options: Additional parameters + for the operation + :type compute_node_get_remote_desktop_options: + ~azure.batch.models.ComputeNodeGetRemoteDesktopOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param callback: When specified, will be called with each chunk of + data that is streamed. The callback should take two arguments, the + bytes of the current chunk of data and the response object. If the + data is uploading, response will be None. + :type callback: Callable[Bytes, response=None] + :param operation_config: :ref:`Operation configuration + overrides`. + :return: object or ClientRawResponse if raw=true + :rtype: Generator or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_get_remote_desktop_options is not None: + timeout = compute_node_get_remote_desktop_options.timeout + client_request_id = None + if compute_node_get_remote_desktop_options is not None: + client_request_id = compute_node_get_remote_desktop_options.client_request_id + return_client_request_id = None + if compute_node_get_remote_desktop_options is not None: + return_client_request_id = compute_node_get_remote_desktop_options.return_client_request_id + ocp_date = None + if compute_node_get_remote_desktop_options is not None: + ocp_date = compute_node_get_remote_desktop_options.ocp_date + + # Construct URL + url = self.get_remote_desktop.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=True, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._client.stream_download(response, callback) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_remote_desktop.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/rdp'} + + def upload_batch_service_logs( + self, pool_id, node_id, upload_batch_service_logs_configuration, compute_node_upload_batch_service_logs_options=None, custom_headers=None, raw=False, **operation_config): + """Upload Azure Batch service log files from the specified Compute Node to + Azure Blob Storage. + + This is for gathering Azure Batch service log files in an automated + fashion from Compute Nodes if you are experiencing an error and wish to + escalate to Azure support. The Azure Batch service log files should be + shared with Azure support to aid in debugging issues with the Batch + service. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node from which you want to + upload the Azure Batch service log files. + :type node_id: str + :param upload_batch_service_logs_configuration: The Azure Batch + service log files upload configuration. + :type upload_batch_service_logs_configuration: + ~azure.batch.models.UploadBatchServiceLogsConfiguration + :param compute_node_upload_batch_service_logs_options: Additional + parameters for the operation + :type compute_node_upload_batch_service_logs_options: + ~azure.batch.models.ComputeNodeUploadBatchServiceLogsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: UploadBatchServiceLogsResult or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.UploadBatchServiceLogsResult or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if compute_node_upload_batch_service_logs_options is not None: + timeout = compute_node_upload_batch_service_logs_options.timeout + client_request_id = None + if compute_node_upload_batch_service_logs_options is not None: + client_request_id = compute_node_upload_batch_service_logs_options.client_request_id + return_client_request_id = None + if compute_node_upload_batch_service_logs_options is not None: + return_client_request_id = compute_node_upload_batch_service_logs_options.return_client_request_id + ocp_date = None + if compute_node_upload_batch_service_logs_options is not None: + ocp_date = compute_node_upload_batch_service_logs_options.ocp_date + + # Construct URL + url = self.upload_batch_service_logs.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(upload_batch_service_logs_configuration, 'UploadBatchServiceLogsConfiguration') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('UploadBatchServiceLogsResult', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + upload_batch_service_logs.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/uploadbatchservicelogs'} + + def list( + self, pool_id, compute_node_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists the Compute Nodes in the specified Pool. + + :param pool_id: The ID of the Pool from which you want to list Compute + Nodes. + :type pool_id: str + :param compute_node_list_options: Additional parameters for the + operation + :type compute_node_list_options: + ~azure.batch.models.ComputeNodeListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of ComputeNode + :rtype: + ~azure.batch.models.ComputeNodePaged[~azure.batch.models.ComputeNode] + :raises: + :class:`BatchErrorException` + """ + filter = None + if compute_node_list_options is not None: + filter = compute_node_list_options.filter + select = None + if compute_node_list_options is not None: + select = compute_node_list_options.select + max_results = None + if compute_node_list_options is not None: + max_results = compute_node_list_options.max_results + timeout = None + if compute_node_list_options is not None: + timeout = compute_node_list_options.timeout + client_request_id = None + if compute_node_list_options is not None: + client_request_id = compute_node_list_options.client_request_id + return_client_request_id = None + if compute_node_list_options is not None: + return_client_request_id = compute_node_list_options.return_client_request_id + ocp_date = None + if compute_node_list_options is not None: + ocp_date = compute_node_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.ComputeNodePaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.ComputeNodePaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/pools/{poolId}/nodes'} diff --git a/azext/generated/sdk/batch/v2019_08_01/operations/file_operations.py b/azext/generated/sdk/batch/v2019_08_01/operations/file_operations.py new file mode 100644 index 00000000..0f808808 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/operations/file_operations.py @@ -0,0 +1,898 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class FileOperations(object): + """FileOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2019-08-01.10.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2019-08-01.10.0" + + self.config = config + + def delete_from_task( + self, job_id, task_id, file_path, recursive=None, file_delete_from_task_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes the specified Task file from the Compute Node where the Task + ran. + + :param job_id: The ID of the Job that contains the Task. + :type job_id: str + :param task_id: The ID of the Task whose file you want to delete. + :type task_id: str + :param file_path: The path to the Task file or directory that you want + to delete. + :type file_path: str + :param recursive: Whether to delete children of a directory. If the + filePath parameter represents a directory instead of a file, you can + set recursive to true to delete the directory and all of the files and + subdirectories in it. If recursive is false then the directory must be + empty or deletion will fail. + :type recursive: bool + :param file_delete_from_task_options: Additional parameters for the + operation + :type file_delete_from_task_options: + ~azure.batch.models.FileDeleteFromTaskOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_delete_from_task_options is not None: + timeout = file_delete_from_task_options.timeout + client_request_id = None + if file_delete_from_task_options is not None: + client_request_id = file_delete_from_task_options.client_request_id + return_client_request_id = None + if file_delete_from_task_options is not None: + return_client_request_id = file_delete_from_task_options.return_client_request_id + ocp_date = None + if file_delete_from_task_options is not None: + ocp_date = file_delete_from_task_options.ocp_date + + # Construct URL + url = self.delete_from_task.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if recursive is not None: + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete_from_task.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/files/{filePath}'} + + def get_from_task( + self, job_id, task_id, file_path, file_get_from_task_options=None, custom_headers=None, raw=False, callback=None, **operation_config): + """Returns the content of the specified Task file. + + :param job_id: The ID of the Job that contains the Task. + :type job_id: str + :param task_id: The ID of the Task whose file you want to retrieve. + :type task_id: str + :param file_path: The path to the Task file that you want to get the + content of. + :type file_path: str + :param file_get_from_task_options: Additional parameters for the + operation + :type file_get_from_task_options: + ~azure.batch.models.FileGetFromTaskOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param callback: When specified, will be called with each chunk of + data that is streamed. The callback should take two arguments, the + bytes of the current chunk of data and the response object. If the + data is uploading, response will be None. + :type callback: Callable[Bytes, response=None] + :param operation_config: :ref:`Operation configuration + overrides`. + :return: object or ClientRawResponse if raw=true + :rtype: Generator or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_get_from_task_options is not None: + timeout = file_get_from_task_options.timeout + client_request_id = None + if file_get_from_task_options is not None: + client_request_id = file_get_from_task_options.client_request_id + return_client_request_id = None + if file_get_from_task_options is not None: + return_client_request_id = file_get_from_task_options.return_client_request_id + ocp_date = None + if file_get_from_task_options is not None: + ocp_date = file_get_from_task_options.ocp_date + ocp_range = None + if file_get_from_task_options is not None: + ocp_range = file_get_from_task_options.ocp_range + if_modified_since = None + if file_get_from_task_options is not None: + if_modified_since = file_get_from_task_options.if_modified_since + if_unmodified_since = None + if file_get_from_task_options is not None: + if_unmodified_since = file_get_from_task_options.if_unmodified_since + + # Construct URL + url = self.get_from_task.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if ocp_range is not None: + header_parameters['ocp-range'] = self._serialize.header("ocp_range", ocp_range, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=True, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._client.stream_download(response, callback) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'ocp-creation-time': 'rfc-1123', + 'ocp-batch-file-isdirectory': 'bool', + 'ocp-batch-file-url': 'str', + 'ocp-batch-file-mode': 'str', + 'Content-Type': 'str', + 'Content-Length': 'long', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_from_task.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/files/{filePath}'} + + def get_properties_from_task( + self, job_id, task_id, file_path, file_get_properties_from_task_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the properties of the specified Task file. + + :param job_id: The ID of the Job that contains the Task. + :type job_id: str + :param task_id: The ID of the Task whose file you want to get the + properties of. + :type task_id: str + :param file_path: The path to the Task file that you want to get the + properties of. + :type file_path: str + :param file_get_properties_from_task_options: Additional parameters + for the operation + :type file_get_properties_from_task_options: + ~azure.batch.models.FileGetPropertiesFromTaskOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_get_properties_from_task_options is not None: + timeout = file_get_properties_from_task_options.timeout + client_request_id = None + if file_get_properties_from_task_options is not None: + client_request_id = file_get_properties_from_task_options.client_request_id + return_client_request_id = None + if file_get_properties_from_task_options is not None: + return_client_request_id = file_get_properties_from_task_options.return_client_request_id + ocp_date = None + if file_get_properties_from_task_options is not None: + ocp_date = file_get_properties_from_task_options.ocp_date + if_modified_since = None + if file_get_properties_from_task_options is not None: + if_modified_since = file_get_properties_from_task_options.if_modified_since + if_unmodified_since = None + if file_get_properties_from_task_options is not None: + if_unmodified_since = file_get_properties_from_task_options.if_unmodified_since + + # Construct URL + url = self.get_properties_from_task.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'ocp-creation-time': 'rfc-1123', + 'ocp-batch-file-isdirectory': 'bool', + 'ocp-batch-file-url': 'str', + 'ocp-batch-file-mode': 'str', + 'Content-Type': 'str', + 'Content-Length': 'long', + }) + return client_raw_response + get_properties_from_task.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/files/{filePath}'} + + def delete_from_compute_node( + self, pool_id, node_id, file_path, recursive=None, file_delete_from_compute_node_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes the specified file from the Compute Node. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node from which you want to + delete the file. + :type node_id: str + :param file_path: The path to the file or directory that you want to + delete. + :type file_path: str + :param recursive: Whether to delete children of a directory. If the + filePath parameter represents a directory instead of a file, you can + set recursive to true to delete the directory and all of the files and + subdirectories in it. If recursive is false then the directory must be + empty or deletion will fail. + :type recursive: bool + :param file_delete_from_compute_node_options: Additional parameters + for the operation + :type file_delete_from_compute_node_options: + ~azure.batch.models.FileDeleteFromComputeNodeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_delete_from_compute_node_options is not None: + timeout = file_delete_from_compute_node_options.timeout + client_request_id = None + if file_delete_from_compute_node_options is not None: + client_request_id = file_delete_from_compute_node_options.client_request_id + return_client_request_id = None + if file_delete_from_compute_node_options is not None: + return_client_request_id = file_delete_from_compute_node_options.return_client_request_id + ocp_date = None + if file_delete_from_compute_node_options is not None: + ocp_date = file_delete_from_compute_node_options.ocp_date + + # Construct URL + url = self.delete_from_compute_node.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if recursive is not None: + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete_from_compute_node.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/files/{filePath}'} + + def get_from_compute_node( + self, pool_id, node_id, file_path, file_get_from_compute_node_options=None, custom_headers=None, raw=False, callback=None, **operation_config): + """Returns the content of the specified Compute Node file. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node that contains the file. + :type node_id: str + :param file_path: The path to the Compute Node file that you want to + get the content of. + :type file_path: str + :param file_get_from_compute_node_options: Additional parameters for + the operation + :type file_get_from_compute_node_options: + ~azure.batch.models.FileGetFromComputeNodeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param callback: When specified, will be called with each chunk of + data that is streamed. The callback should take two arguments, the + bytes of the current chunk of data and the response object. If the + data is uploading, response will be None. + :type callback: Callable[Bytes, response=None] + :param operation_config: :ref:`Operation configuration + overrides`. + :return: object or ClientRawResponse if raw=true + :rtype: Generator or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_get_from_compute_node_options is not None: + timeout = file_get_from_compute_node_options.timeout + client_request_id = None + if file_get_from_compute_node_options is not None: + client_request_id = file_get_from_compute_node_options.client_request_id + return_client_request_id = None + if file_get_from_compute_node_options is not None: + return_client_request_id = file_get_from_compute_node_options.return_client_request_id + ocp_date = None + if file_get_from_compute_node_options is not None: + ocp_date = file_get_from_compute_node_options.ocp_date + ocp_range = None + if file_get_from_compute_node_options is not None: + ocp_range = file_get_from_compute_node_options.ocp_range + if_modified_since = None + if file_get_from_compute_node_options is not None: + if_modified_since = file_get_from_compute_node_options.if_modified_since + if_unmodified_since = None + if file_get_from_compute_node_options is not None: + if_unmodified_since = file_get_from_compute_node_options.if_unmodified_since + + # Construct URL + url = self.get_from_compute_node.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if ocp_range is not None: + header_parameters['ocp-range'] = self._serialize.header("ocp_range", ocp_range, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=True, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._client.stream_download(response, callback) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'ocp-creation-time': 'rfc-1123', + 'ocp-batch-file-isdirectory': 'bool', + 'ocp-batch-file-url': 'str', + 'ocp-batch-file-mode': 'str', + 'Content-Type': 'str', + 'Content-Length': 'long', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_from_compute_node.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/files/{filePath}'} + + def get_properties_from_compute_node( + self, pool_id, node_id, file_path, file_get_properties_from_compute_node_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the properties of the specified Compute Node file. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node that contains the file. + :type node_id: str + :param file_path: The path to the Compute Node file that you want to + get the properties of. + :type file_path: str + :param file_get_properties_from_compute_node_options: Additional + parameters for the operation + :type file_get_properties_from_compute_node_options: + ~azure.batch.models.FileGetPropertiesFromComputeNodeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if file_get_properties_from_compute_node_options is not None: + timeout = file_get_properties_from_compute_node_options.timeout + client_request_id = None + if file_get_properties_from_compute_node_options is not None: + client_request_id = file_get_properties_from_compute_node_options.client_request_id + return_client_request_id = None + if file_get_properties_from_compute_node_options is not None: + return_client_request_id = file_get_properties_from_compute_node_options.return_client_request_id + ocp_date = None + if file_get_properties_from_compute_node_options is not None: + ocp_date = file_get_properties_from_compute_node_options.ocp_date + if_modified_since = None + if file_get_properties_from_compute_node_options is not None: + if_modified_since = file_get_properties_from_compute_node_options.if_modified_since + if_unmodified_since = None + if file_get_properties_from_compute_node_options is not None: + if_unmodified_since = file_get_properties_from_compute_node_options.if_unmodified_since + + # Construct URL + url = self.get_properties_from_compute_node.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str'), + 'filePath': self._serialize.url("file_path", file_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'ocp-creation-time': 'rfc-1123', + 'ocp-batch-file-isdirectory': 'bool', + 'ocp-batch-file-url': 'str', + 'ocp-batch-file-mode': 'str', + 'Content-Type': 'str', + 'Content-Length': 'long', + }) + return client_raw_response + get_properties_from_compute_node.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/files/{filePath}'} + + def list_from_task( + self, job_id, task_id, recursive=None, file_list_from_task_options=None, custom_headers=None, raw=False, **operation_config): + """Lists the files in a Task's directory on its Compute Node. + + :param job_id: The ID of the Job that contains the Task. + :type job_id: str + :param task_id: The ID of the Task whose files you want to list. + :type task_id: str + :param recursive: Whether to list children of the Task directory. This + parameter can be used in combination with the filter parameter to list + specific type of files. + :type recursive: bool + :param file_list_from_task_options: Additional parameters for the + operation + :type file_list_from_task_options: + ~azure.batch.models.FileListFromTaskOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of NodeFile + :rtype: + ~azure.batch.models.NodeFilePaged[~azure.batch.models.NodeFile] + :raises: + :class:`BatchErrorException` + """ + filter = None + if file_list_from_task_options is not None: + filter = file_list_from_task_options.filter + max_results = None + if file_list_from_task_options is not None: + max_results = file_list_from_task_options.max_results + timeout = None + if file_list_from_task_options is not None: + timeout = file_list_from_task_options.timeout + client_request_id = None + if file_list_from_task_options is not None: + client_request_id = file_list_from_task_options.client_request_id + return_client_request_id = None + if file_list_from_task_options is not None: + return_client_request_id = file_list_from_task_options.return_client_request_id + ocp_date = None + if file_list_from_task_options is not None: + ocp_date = file_list_from_task_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_from_task.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if recursive is not None: + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.NodeFilePaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.NodeFilePaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_from_task.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/files'} + + def list_from_compute_node( + self, pool_id, node_id, recursive=None, file_list_from_compute_node_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the files in Task directories on the specified Compute + Node. + + :param pool_id: The ID of the Pool that contains the Compute Node. + :type pool_id: str + :param node_id: The ID of the Compute Node whose files you want to + list. + :type node_id: str + :param recursive: Whether to list children of a directory. + :type recursive: bool + :param file_list_from_compute_node_options: Additional parameters for + the operation + :type file_list_from_compute_node_options: + ~azure.batch.models.FileListFromComputeNodeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of NodeFile + :rtype: + ~azure.batch.models.NodeFilePaged[~azure.batch.models.NodeFile] + :raises: + :class:`BatchErrorException` + """ + filter = None + if file_list_from_compute_node_options is not None: + filter = file_list_from_compute_node_options.filter + max_results = None + if file_list_from_compute_node_options is not None: + max_results = file_list_from_compute_node_options.max_results + timeout = None + if file_list_from_compute_node_options is not None: + timeout = file_list_from_compute_node_options.timeout + client_request_id = None + if file_list_from_compute_node_options is not None: + client_request_id = file_list_from_compute_node_options.client_request_id + return_client_request_id = None + if file_list_from_compute_node_options is not None: + return_client_request_id = file_list_from_compute_node_options.return_client_request_id + ocp_date = None + if file_list_from_compute_node_options is not None: + ocp_date = file_list_from_compute_node_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_from_compute_node.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str'), + 'nodeId': self._serialize.url("node_id", node_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if recursive is not None: + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.NodeFilePaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.NodeFilePaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_from_compute_node.metadata = {'url': '/pools/{poolId}/nodes/{nodeId}/files'} diff --git a/azext/generated/sdk/batch/v2019_08_01/operations/job_operations.py b/azext/generated/sdk/batch/v2019_08_01/operations/job_operations.py new file mode 100644 index 00000000..1a9edea0 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/operations/job_operations.py @@ -0,0 +1,1439 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class JobOperations(object): + """JobOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2019-08-01.10.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2019-08-01.10.0" + + self.config = config + + def get_all_lifetime_statistics( + self, job_get_all_lifetime_statistics_options=None, custom_headers=None, raw=False, **operation_config): + """Gets lifetime summary statistics for all of the Jobs in the specified + Account. + + Statistics are aggregated across all Jobs that have ever existed in the + Account, from Account creation to the last update time of the + statistics. The statistics may not be immediately available. The Batch + service performs periodic roll-up of statistics. The typical delay is + about 30 minutes. + + :param job_get_all_lifetime_statistics_options: Additional parameters + for the operation + :type job_get_all_lifetime_statistics_options: + ~azure.batch.models.JobGetAllLifetimeStatisticsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: JobStatistics or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.JobStatistics or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_get_all_lifetime_statistics_options is not None: + timeout = job_get_all_lifetime_statistics_options.timeout + client_request_id = None + if job_get_all_lifetime_statistics_options is not None: + client_request_id = job_get_all_lifetime_statistics_options.client_request_id + return_client_request_id = None + if job_get_all_lifetime_statistics_options is not None: + return_client_request_id = job_get_all_lifetime_statistics_options.return_client_request_id + ocp_date = None + if job_get_all_lifetime_statistics_options is not None: + ocp_date = job_get_all_lifetime_statistics_options.ocp_date + + # Construct URL + url = self.get_all_lifetime_statistics.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('JobStatistics', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_all_lifetime_statistics.metadata = {'url': '/lifetimejobstats'} + + def delete( + self, job_id, job_delete_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a Job. + + Deleting a Job also deletes all Tasks that are part of that Job, and + all Job statistics. This also overrides the retention period for Task + data; that is, if the Job contains Tasks which are still retained on + Compute Nodes, the Batch services deletes those Tasks' working + directories and all their contents. When a Delete Job request is + received, the Batch service sets the Job to the deleting state. All + update operations on a Job that is in deleting state will fail with + status code 409 (Conflict), with additional information indicating that + the Job is being deleted. + + :param job_id: The ID of the Job to delete. + :type job_id: str + :param job_delete_options: Additional parameters for the operation + :type job_delete_options: ~azure.batch.models.JobDeleteOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_delete_options is not None: + timeout = job_delete_options.timeout + client_request_id = None + if job_delete_options is not None: + client_request_id = job_delete_options.client_request_id + return_client_request_id = None + if job_delete_options is not None: + return_client_request_id = job_delete_options.return_client_request_id + ocp_date = None + if job_delete_options is not None: + ocp_date = job_delete_options.ocp_date + if_match = None + if job_delete_options is not None: + if_match = job_delete_options.if_match + if_none_match = None + if job_delete_options is not None: + if_none_match = job_delete_options.if_none_match + if_modified_since = None + if job_delete_options is not None: + if_modified_since = job_delete_options.if_modified_since + if_unmodified_since = None + if job_delete_options is not None: + if_unmodified_since = job_delete_options.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202, 404]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete.metadata = {'url': '/jobs/{jobId}'} + + def get( + self, job_id, job_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified Job. + + :param job_id: The ID of the Job. + :type job_id: str + :param job_get_options: Additional parameters for the operation + :type job_get_options: ~azure.batch.models.JobGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: CloudJob or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.CloudJob or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if job_get_options is not None: + select = job_get_options.select + expand = None + if job_get_options is not None: + expand = job_get_options.expand + timeout = None + if job_get_options is not None: + timeout = job_get_options.timeout + client_request_id = None + if job_get_options is not None: + client_request_id = job_get_options.client_request_id + return_client_request_id = None + if job_get_options is not None: + return_client_request_id = job_get_options.return_client_request_id + ocp_date = None + if job_get_options is not None: + ocp_date = job_get_options.ocp_date + if_match = None + if job_get_options is not None: + if_match = job_get_options.if_match + if_none_match = None + if job_get_options is not None: + if_none_match = job_get_options.if_none_match + if_modified_since = None + if job_get_options is not None: + if_modified_since = job_get_options.if_modified_since + if_unmodified_since = None + if job_get_options is not None: + if_unmodified_since = job_get_options.if_unmodified_since + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('CloudJob', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/jobs/{jobId}'} + + def patch( + self, job_id, job_patch_parameter, job_patch_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified Job. + + This replaces only the Job properties specified in the request. For + example, if the Job has constraints, and a request does not specify the + constraints element, then the Job keeps the existing constraints. + + :param job_id: The ID of the Job whose properties you want to update. + :type job_id: str + :param job_patch_parameter: The parameters for the request. + :type job_patch_parameter: ~azure.batch.models.JobPatchParameter + :param job_patch_options: Additional parameters for the operation + :type job_patch_options: ~azure.batch.models.JobPatchOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_patch_options is not None: + timeout = job_patch_options.timeout + client_request_id = None + if job_patch_options is not None: + client_request_id = job_patch_options.client_request_id + return_client_request_id = None + if job_patch_options is not None: + return_client_request_id = job_patch_options.return_client_request_id + ocp_date = None + if job_patch_options is not None: + ocp_date = job_patch_options.ocp_date + if_match = None + if job_patch_options is not None: + if_match = job_patch_options.if_match + if_none_match = None + if job_patch_options is not None: + if_none_match = job_patch_options.if_none_match + if_modified_since = None + if job_patch_options is not None: + if_modified_since = job_patch_options.if_modified_since + if_unmodified_since = None + if job_patch_options is not None: + if_unmodified_since = job_patch_options.if_unmodified_since + + # Construct URL + url = self.patch.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job_patch_parameter, 'JobPatchParameter') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + patch.metadata = {'url': '/jobs/{jobId}'} + + def update( + self, job_id, job_update_parameter, job_update_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified Job. + + This fully replaces all the updatable properties of the Job. For + example, if the Job has constraints associated with it and if + constraints is not specified with this request, then the Batch service + will remove the existing constraints. + + :param job_id: The ID of the Job whose properties you want to update. + :type job_id: str + :param job_update_parameter: The parameters for the request. + :type job_update_parameter: ~azure.batch.models.JobUpdateParameter + :param job_update_options: Additional parameters for the operation + :type job_update_options: ~azure.batch.models.JobUpdateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_update_options is not None: + timeout = job_update_options.timeout + client_request_id = None + if job_update_options is not None: + client_request_id = job_update_options.client_request_id + return_client_request_id = None + if job_update_options is not None: + return_client_request_id = job_update_options.return_client_request_id + ocp_date = None + if job_update_options is not None: + ocp_date = job_update_options.ocp_date + if_match = None + if job_update_options is not None: + if_match = job_update_options.if_match + if_none_match = None + if job_update_options is not None: + if_none_match = job_update_options.if_none_match + if_modified_since = None + if job_update_options is not None: + if_modified_since = job_update_options.if_modified_since + if_unmodified_since = None + if job_update_options is not None: + if_unmodified_since = job_update_options.if_unmodified_since + + # Construct URL + url = self.update.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job_update_parameter, 'JobUpdateParameter') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + update.metadata = {'url': '/jobs/{jobId}'} + + def disable( + self, job_id, disable_tasks, job_disable_options=None, custom_headers=None, raw=False, **operation_config): + """Disables the specified Job, preventing new Tasks from running. + + The Batch Service immediately moves the Job to the disabling state. + Batch then uses the disableTasks parameter to determine what to do with + the currently running Tasks of the Job. The Job remains in the + disabling state until the disable operation is completed and all Tasks + have been dealt with according to the disableTasks option; the Job then + moves to the disabled state. No new Tasks are started under the Job + until it moves back to active state. If you try to disable a Job that + is in any state other than active, disabling, or disabled, the request + fails with status code 409. + + :param job_id: The ID of the Job to disable. + :type job_id: str + :param disable_tasks: What to do with active Tasks associated with the + Job. Possible values include: 'requeue', 'terminate', 'wait' + :type disable_tasks: str or ~azure.batch.models.DisableJobOption + :param job_disable_options: Additional parameters for the operation + :type job_disable_options: ~azure.batch.models.JobDisableOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_disable_options is not None: + timeout = job_disable_options.timeout + client_request_id = None + if job_disable_options is not None: + client_request_id = job_disable_options.client_request_id + return_client_request_id = None + if job_disable_options is not None: + return_client_request_id = job_disable_options.return_client_request_id + ocp_date = None + if job_disable_options is not None: + ocp_date = job_disable_options.ocp_date + if_match = None + if job_disable_options is not None: + if_match = job_disable_options.if_match + if_none_match = None + if job_disable_options is not None: + if_none_match = job_disable_options.if_none_match + if_modified_since = None + if job_disable_options is not None: + if_modified_since = job_disable_options.if_modified_since + if_unmodified_since = None + if job_disable_options is not None: + if_unmodified_since = job_disable_options.if_unmodified_since + job_disable_parameter = models.JobDisableParameter(disable_tasks=disable_tasks) + + # Construct URL + url = self.disable.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job_disable_parameter, 'JobDisableParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + disable.metadata = {'url': '/jobs/{jobId}/disable'} + + def enable( + self, job_id, job_enable_options=None, custom_headers=None, raw=False, **operation_config): + """Enables the specified Job, allowing new Tasks to run. + + When you call this API, the Batch service sets a disabled Job to the + enabling state. After the this operation is completed, the Job moves to + the active state, and scheduling of new Tasks under the Job resumes. + The Batch service does not allow a Task to remain in the active state + for more than 180 days. Therefore, if you enable a Job containing + active Tasks which were added more than 180 days ago, those Tasks will + not run. + + :param job_id: The ID of the Job to enable. + :type job_id: str + :param job_enable_options: Additional parameters for the operation + :type job_enable_options: ~azure.batch.models.JobEnableOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_enable_options is not None: + timeout = job_enable_options.timeout + client_request_id = None + if job_enable_options is not None: + client_request_id = job_enable_options.client_request_id + return_client_request_id = None + if job_enable_options is not None: + return_client_request_id = job_enable_options.return_client_request_id + ocp_date = None + if job_enable_options is not None: + ocp_date = job_enable_options.ocp_date + if_match = None + if job_enable_options is not None: + if_match = job_enable_options.if_match + if_none_match = None + if job_enable_options is not None: + if_none_match = job_enable_options.if_none_match + if_modified_since = None + if job_enable_options is not None: + if_modified_since = job_enable_options.if_modified_since + if_unmodified_since = None + if job_enable_options is not None: + if_unmodified_since = job_enable_options.if_unmodified_since + + # Construct URL + url = self.enable.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + enable.metadata = {'url': '/jobs/{jobId}/enable'} + + def terminate( + self, job_id, terminate_reason=None, job_terminate_options=None, custom_headers=None, raw=False, **operation_config): + """Terminates the specified Job, marking it as completed. + + When a Terminate Job request is received, the Batch service sets the + Job to the terminating state. The Batch service then terminates any + running Tasks associated with the Job and runs any required Job release + Tasks. Then the Job moves into the completed state. If there are any + Tasks in the Job in the active state, they will remain in the active + state. Once a Job is terminated, new Tasks cannot be added and any + remaining active Tasks will not be scheduled. + + :param job_id: The ID of the Job to terminate. + :type job_id: str + :param terminate_reason: The text you want to appear as the Job's + TerminateReason. The default is 'UserTerminate'. + :type terminate_reason: str + :param job_terminate_options: Additional parameters for the operation + :type job_terminate_options: ~azure.batch.models.JobTerminateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_terminate_options is not None: + timeout = job_terminate_options.timeout + client_request_id = None + if job_terminate_options is not None: + client_request_id = job_terminate_options.client_request_id + return_client_request_id = None + if job_terminate_options is not None: + return_client_request_id = job_terminate_options.return_client_request_id + ocp_date = None + if job_terminate_options is not None: + ocp_date = job_terminate_options.ocp_date + if_match = None + if job_terminate_options is not None: + if_match = job_terminate_options.if_match + if_none_match = None + if job_terminate_options is not None: + if_none_match = job_terminate_options.if_none_match + if_modified_since = None + if job_terminate_options is not None: + if_modified_since = job_terminate_options.if_modified_since + if_unmodified_since = None + if job_terminate_options is not None: + if_unmodified_since = job_terminate_options.if_unmodified_since + job_terminate_parameter = None + if terminate_reason is not None: + job_terminate_parameter = models.JobTerminateParameter(terminate_reason=terminate_reason) + + # Construct URL + url = self.terminate.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + if job_terminate_parameter is not None: + body_content = self._serialize.body(job_terminate_parameter, 'JobTerminateParameter') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + terminate.metadata = {'url': '/jobs/{jobId}/terminate'} + + def add( + self, job, job_add_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a Job to the specified Account. + + The Batch service supports two ways to control the work done as part of + a Job. In the first approach, the user specifies a Job Manager Task. + The Batch service launches this Task when it is ready to start the Job. + The Job Manager Task controls all other Tasks that run under this Job, + by using the Task APIs. In the second approach, the user directly + controls the execution of Tasks under an active Job, by using the Task + APIs. Also note: when naming Jobs, avoid including sensitive + information such as user names or secret project names. This + information may appear in telemetry logs accessible to Microsoft + Support engineers. + + :param job: The Job to be added. + :type job: ~azure.batch.models.JobAddParameter + :param job_add_options: Additional parameters for the operation + :type job_add_options: ~azure.batch.models.JobAddOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_add_options is not None: + timeout = job_add_options.timeout + client_request_id = None + if job_add_options is not None: + client_request_id = job_add_options.client_request_id + return_client_request_id = None + if job_add_options is not None: + return_client_request_id = job_add_options.return_client_request_id + ocp_date = None + if job_add_options is not None: + ocp_date = job_add_options.ocp_date + + # Construct URL + url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job, 'JobAddParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add.metadata = {'url': '/jobs'} + + def list( + self, job_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the Jobs in the specified Account. + + :param job_list_options: Additional parameters for the operation + :type job_list_options: ~azure.batch.models.JobListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of CloudJob + :rtype: + ~azure.batch.models.CloudJobPaged[~azure.batch.models.CloudJob] + :raises: + :class:`BatchErrorException` + """ + filter = None + if job_list_options is not None: + filter = job_list_options.filter + select = None + if job_list_options is not None: + select = job_list_options.select + expand = None + if job_list_options is not None: + expand = job_list_options.expand + max_results = None + if job_list_options is not None: + max_results = job_list_options.max_results + timeout = None + if job_list_options is not None: + timeout = job_list_options.timeout + client_request_id = None + if job_list_options is not None: + client_request_id = job_list_options.client_request_id + return_client_request_id = None + if job_list_options is not None: + return_client_request_id = job_list_options.return_client_request_id + ocp_date = None + if job_list_options is not None: + ocp_date = job_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CloudJobPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CloudJobPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/jobs'} + + def list_from_job_schedule( + self, job_schedule_id, job_list_from_job_schedule_options=None, custom_headers=None, raw=False, **operation_config): + """Lists the Jobs that have been created under the specified Job Schedule. + + :param job_schedule_id: The ID of the Job Schedule from which you want + to get a list of Jobs. + :type job_schedule_id: str + :param job_list_from_job_schedule_options: Additional parameters for + the operation + :type job_list_from_job_schedule_options: + ~azure.batch.models.JobListFromJobScheduleOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of CloudJob + :rtype: + ~azure.batch.models.CloudJobPaged[~azure.batch.models.CloudJob] + :raises: + :class:`BatchErrorException` + """ + filter = None + if job_list_from_job_schedule_options is not None: + filter = job_list_from_job_schedule_options.filter + select = None + if job_list_from_job_schedule_options is not None: + select = job_list_from_job_schedule_options.select + expand = None + if job_list_from_job_schedule_options is not None: + expand = job_list_from_job_schedule_options.expand + max_results = None + if job_list_from_job_schedule_options is not None: + max_results = job_list_from_job_schedule_options.max_results + timeout = None + if job_list_from_job_schedule_options is not None: + timeout = job_list_from_job_schedule_options.timeout + client_request_id = None + if job_list_from_job_schedule_options is not None: + client_request_id = job_list_from_job_schedule_options.client_request_id + return_client_request_id = None + if job_list_from_job_schedule_options is not None: + return_client_request_id = job_list_from_job_schedule_options.return_client_request_id + ocp_date = None + if job_list_from_job_schedule_options is not None: + ocp_date = job_list_from_job_schedule_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_from_job_schedule.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CloudJobPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CloudJobPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_from_job_schedule.metadata = {'url': '/jobschedules/{jobScheduleId}/jobs'} + + def list_preparation_and_release_task_status( + self, job_id, job_list_preparation_and_release_task_status_options=None, custom_headers=None, raw=False, **operation_config): + """Lists the execution status of the Job Preparation and Job Release Task + for the specified Job across the Compute Nodes where the Job has run. + + This API returns the Job Preparation and Job Release Task status on all + Compute Nodes that have run the Job Preparation or Job Release Task. + This includes Compute Nodes which have since been removed from the + Pool. If this API is invoked on a Job which has no Job Preparation or + Job Release Task, the Batch service returns HTTP status code 409 + (Conflict) with an error code of JobPreparationTaskNotSpecified. + + :param job_id: The ID of the Job. + :type job_id: str + :param job_list_preparation_and_release_task_status_options: + Additional parameters for the operation + :type job_list_preparation_and_release_task_status_options: + ~azure.batch.models.JobListPreparationAndReleaseTaskStatusOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of + JobPreparationAndReleaseTaskExecutionInformation + :rtype: + ~azure.batch.models.JobPreparationAndReleaseTaskExecutionInformationPaged[~azure.batch.models.JobPreparationAndReleaseTaskExecutionInformation] + :raises: + :class:`BatchErrorException` + """ + filter = None + if job_list_preparation_and_release_task_status_options is not None: + filter = job_list_preparation_and_release_task_status_options.filter + select = None + if job_list_preparation_and_release_task_status_options is not None: + select = job_list_preparation_and_release_task_status_options.select + max_results = None + if job_list_preparation_and_release_task_status_options is not None: + max_results = job_list_preparation_and_release_task_status_options.max_results + timeout = None + if job_list_preparation_and_release_task_status_options is not None: + timeout = job_list_preparation_and_release_task_status_options.timeout + client_request_id = None + if job_list_preparation_and_release_task_status_options is not None: + client_request_id = job_list_preparation_and_release_task_status_options.client_request_id + return_client_request_id = None + if job_list_preparation_and_release_task_status_options is not None: + return_client_request_id = job_list_preparation_and_release_task_status_options.return_client_request_id + ocp_date = None + if job_list_preparation_and_release_task_status_options is not None: + ocp_date = job_list_preparation_and_release_task_status_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_preparation_and_release_task_status.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.JobPreparationAndReleaseTaskExecutionInformationPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.JobPreparationAndReleaseTaskExecutionInformationPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_preparation_and_release_task_status.metadata = {'url': '/jobs/{jobId}/jobpreparationandreleasetaskstatus'} + + def get_task_counts( + self, job_id, job_get_task_counts_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the Task counts for the specified Job. + + Task counts provide a count of the Tasks by active, running or + completed Task state, and a count of Tasks which succeeded or failed. + Tasks in the preparing state are counted as running. + + :param job_id: The ID of the Job. + :type job_id: str + :param job_get_task_counts_options: Additional parameters for the + operation + :type job_get_task_counts_options: + ~azure.batch.models.JobGetTaskCountsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: TaskCounts or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.TaskCounts or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_get_task_counts_options is not None: + timeout = job_get_task_counts_options.timeout + client_request_id = None + if job_get_task_counts_options is not None: + client_request_id = job_get_task_counts_options.client_request_id + return_client_request_id = None + if job_get_task_counts_options is not None: + return_client_request_id = job_get_task_counts_options.return_client_request_id + ocp_date = None + if job_get_task_counts_options is not None: + ocp_date = job_get_task_counts_options.ocp_date + + # Construct URL + url = self.get_task_counts.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('TaskCounts', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_task_counts.metadata = {'url': '/jobs/{jobId}/taskcounts'} diff --git a/azext/generated/sdk/batch/v2019_08_01/operations/job_schedule_operations.py b/azext/generated/sdk/batch/v2019_08_01/operations/job_schedule_operations.py new file mode 100644 index 00000000..2f1f87f8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/operations/job_schedule_operations.py @@ -0,0 +1,1093 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class JobScheduleOperations(object): + """JobScheduleOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2019-08-01.10.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2019-08-01.10.0" + + self.config = config + + def exists( + self, job_schedule_id, job_schedule_exists_options=None, custom_headers=None, raw=False, **operation_config): + """Checks the specified Job Schedule exists. + + :param job_schedule_id: The ID of the Job Schedule which you want to + check. + :type job_schedule_id: str + :param job_schedule_exists_options: Additional parameters for the + operation + :type job_schedule_exists_options: + ~azure.batch.models.JobScheduleExistsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: bool or ClientRawResponse if raw=true + :rtype: bool or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_exists_options is not None: + timeout = job_schedule_exists_options.timeout + client_request_id = None + if job_schedule_exists_options is not None: + client_request_id = job_schedule_exists_options.client_request_id + return_client_request_id = None + if job_schedule_exists_options is not None: + return_client_request_id = job_schedule_exists_options.return_client_request_id + ocp_date = None + if job_schedule_exists_options is not None: + ocp_date = job_schedule_exists_options.ocp_date + if_match = None + if job_schedule_exists_options is not None: + if_match = job_schedule_exists_options.if_match + if_none_match = None + if job_schedule_exists_options is not None: + if_none_match = job_schedule_exists_options.if_none_match + if_modified_since = None + if job_schedule_exists_options is not None: + if_modified_since = job_schedule_exists_options.if_modified_since + if_unmodified_since = None + if job_schedule_exists_options is not None: + if_unmodified_since = job_schedule_exists_options.if_unmodified_since + + # Construct URL + url = self.exists.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 404]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = (response.status_code == 200) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + }) + return client_raw_response + return deserialized + exists.metadata = {'url': '/jobschedules/{jobScheduleId}'} + + def delete( + self, job_schedule_id, job_schedule_delete_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a Job Schedule from the specified Account. + + When you delete a Job Schedule, this also deletes all Jobs and Tasks + under that schedule. When Tasks are deleted, all the files in their + working directories on the Compute Nodes are also deleted (the + retention period is ignored). The Job Schedule statistics are no longer + accessible once the Job Schedule is deleted, though they are still + counted towards Account lifetime statistics. + + :param job_schedule_id: The ID of the Job Schedule to delete. + :type job_schedule_id: str + :param job_schedule_delete_options: Additional parameters for the + operation + :type job_schedule_delete_options: + ~azure.batch.models.JobScheduleDeleteOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_delete_options is not None: + timeout = job_schedule_delete_options.timeout + client_request_id = None + if job_schedule_delete_options is not None: + client_request_id = job_schedule_delete_options.client_request_id + return_client_request_id = None + if job_schedule_delete_options is not None: + return_client_request_id = job_schedule_delete_options.return_client_request_id + ocp_date = None + if job_schedule_delete_options is not None: + ocp_date = job_schedule_delete_options.ocp_date + if_match = None + if job_schedule_delete_options is not None: + if_match = job_schedule_delete_options.if_match + if_none_match = None + if job_schedule_delete_options is not None: + if_none_match = job_schedule_delete_options.if_none_match + if_modified_since = None + if job_schedule_delete_options is not None: + if_modified_since = job_schedule_delete_options.if_modified_since + if_unmodified_since = None + if job_schedule_delete_options is not None: + if_unmodified_since = job_schedule_delete_options.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete.metadata = {'url': '/jobschedules/{jobScheduleId}'} + + def get( + self, job_schedule_id, job_schedule_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified Job Schedule. + + :param job_schedule_id: The ID of the Job Schedule to get. + :type job_schedule_id: str + :param job_schedule_get_options: Additional parameters for the + operation + :type job_schedule_get_options: + ~azure.batch.models.JobScheduleGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: CloudJobSchedule or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.CloudJobSchedule or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if job_schedule_get_options is not None: + select = job_schedule_get_options.select + expand = None + if job_schedule_get_options is not None: + expand = job_schedule_get_options.expand + timeout = None + if job_schedule_get_options is not None: + timeout = job_schedule_get_options.timeout + client_request_id = None + if job_schedule_get_options is not None: + client_request_id = job_schedule_get_options.client_request_id + return_client_request_id = None + if job_schedule_get_options is not None: + return_client_request_id = job_schedule_get_options.return_client_request_id + ocp_date = None + if job_schedule_get_options is not None: + ocp_date = job_schedule_get_options.ocp_date + if_match = None + if job_schedule_get_options is not None: + if_match = job_schedule_get_options.if_match + if_none_match = None + if job_schedule_get_options is not None: + if_none_match = job_schedule_get_options.if_none_match + if_modified_since = None + if job_schedule_get_options is not None: + if_modified_since = job_schedule_get_options.if_modified_since + if_unmodified_since = None + if job_schedule_get_options is not None: + if_unmodified_since = job_schedule_get_options.if_unmodified_since + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('CloudJobSchedule', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/jobschedules/{jobScheduleId}'} + + def patch( + self, job_schedule_id, job_schedule_patch_parameter, job_schedule_patch_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified Job Schedule. + + This replaces only the Job Schedule properties specified in the + request. For example, if the schedule property is not specified with + this request, then the Batch service will keep the existing schedule. + Changes to a Job Schedule only impact Jobs created by the schedule + after the update has taken place; currently running Jobs are + unaffected. + + :param job_schedule_id: The ID of the Job Schedule to update. + :type job_schedule_id: str + :param job_schedule_patch_parameter: The parameters for the request. + :type job_schedule_patch_parameter: + ~azure.batch.models.JobSchedulePatchParameter + :param job_schedule_patch_options: Additional parameters for the + operation + :type job_schedule_patch_options: + ~azure.batch.models.JobSchedulePatchOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_patch_options is not None: + timeout = job_schedule_patch_options.timeout + client_request_id = None + if job_schedule_patch_options is not None: + client_request_id = job_schedule_patch_options.client_request_id + return_client_request_id = None + if job_schedule_patch_options is not None: + return_client_request_id = job_schedule_patch_options.return_client_request_id + ocp_date = None + if job_schedule_patch_options is not None: + ocp_date = job_schedule_patch_options.ocp_date + if_match = None + if job_schedule_patch_options is not None: + if_match = job_schedule_patch_options.if_match + if_none_match = None + if job_schedule_patch_options is not None: + if_none_match = job_schedule_patch_options.if_none_match + if_modified_since = None + if job_schedule_patch_options is not None: + if_modified_since = job_schedule_patch_options.if_modified_since + if_unmodified_since = None + if job_schedule_patch_options is not None: + if_unmodified_since = job_schedule_patch_options.if_unmodified_since + + # Construct URL + url = self.patch.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job_schedule_patch_parameter, 'JobSchedulePatchParameter') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + patch.metadata = {'url': '/jobschedules/{jobScheduleId}'} + + def update( + self, job_schedule_id, job_schedule_update_parameter, job_schedule_update_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified Job Schedule. + + This fully replaces all the updatable properties of the Job Schedule. + For example, if the schedule property is not specified with this + request, then the Batch service will remove the existing schedule. + Changes to a Job Schedule only impact Jobs created by the schedule + after the update has taken place; currently running Jobs are + unaffected. + + :param job_schedule_id: The ID of the Job Schedule to update. + :type job_schedule_id: str + :param job_schedule_update_parameter: The parameters for the request. + :type job_schedule_update_parameter: + ~azure.batch.models.JobScheduleUpdateParameter + :param job_schedule_update_options: Additional parameters for the + operation + :type job_schedule_update_options: + ~azure.batch.models.JobScheduleUpdateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_update_options is not None: + timeout = job_schedule_update_options.timeout + client_request_id = None + if job_schedule_update_options is not None: + client_request_id = job_schedule_update_options.client_request_id + return_client_request_id = None + if job_schedule_update_options is not None: + return_client_request_id = job_schedule_update_options.return_client_request_id + ocp_date = None + if job_schedule_update_options is not None: + ocp_date = job_schedule_update_options.ocp_date + if_match = None + if job_schedule_update_options is not None: + if_match = job_schedule_update_options.if_match + if_none_match = None + if job_schedule_update_options is not None: + if_none_match = job_schedule_update_options.if_none_match + if_modified_since = None + if job_schedule_update_options is not None: + if_modified_since = job_schedule_update_options.if_modified_since + if_unmodified_since = None + if job_schedule_update_options is not None: + if_unmodified_since = job_schedule_update_options.if_unmodified_since + + # Construct URL + url = self.update.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(job_schedule_update_parameter, 'JobScheduleUpdateParameter') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + update.metadata = {'url': '/jobschedules/{jobScheduleId}'} + + def disable( + self, job_schedule_id, job_schedule_disable_options=None, custom_headers=None, raw=False, **operation_config): + """Disables a Job Schedule. + + No new Jobs will be created until the Job Schedule is enabled again. + + :param job_schedule_id: The ID of the Job Schedule to disable. + :type job_schedule_id: str + :param job_schedule_disable_options: Additional parameters for the + operation + :type job_schedule_disable_options: + ~azure.batch.models.JobScheduleDisableOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_disable_options is not None: + timeout = job_schedule_disable_options.timeout + client_request_id = None + if job_schedule_disable_options is not None: + client_request_id = job_schedule_disable_options.client_request_id + return_client_request_id = None + if job_schedule_disable_options is not None: + return_client_request_id = job_schedule_disable_options.return_client_request_id + ocp_date = None + if job_schedule_disable_options is not None: + ocp_date = job_schedule_disable_options.ocp_date + if_match = None + if job_schedule_disable_options is not None: + if_match = job_schedule_disable_options.if_match + if_none_match = None + if job_schedule_disable_options is not None: + if_none_match = job_schedule_disable_options.if_none_match + if_modified_since = None + if job_schedule_disable_options is not None: + if_modified_since = job_schedule_disable_options.if_modified_since + if_unmodified_since = None + if job_schedule_disable_options is not None: + if_unmodified_since = job_schedule_disable_options.if_unmodified_since + + # Construct URL + url = self.disable.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + disable.metadata = {'url': '/jobschedules/{jobScheduleId}/disable'} + + def enable( + self, job_schedule_id, job_schedule_enable_options=None, custom_headers=None, raw=False, **operation_config): + """Enables a Job Schedule. + + :param job_schedule_id: The ID of the Job Schedule to enable. + :type job_schedule_id: str + :param job_schedule_enable_options: Additional parameters for the + operation + :type job_schedule_enable_options: + ~azure.batch.models.JobScheduleEnableOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_enable_options is not None: + timeout = job_schedule_enable_options.timeout + client_request_id = None + if job_schedule_enable_options is not None: + client_request_id = job_schedule_enable_options.client_request_id + return_client_request_id = None + if job_schedule_enable_options is not None: + return_client_request_id = job_schedule_enable_options.return_client_request_id + ocp_date = None + if job_schedule_enable_options is not None: + ocp_date = job_schedule_enable_options.ocp_date + if_match = None + if job_schedule_enable_options is not None: + if_match = job_schedule_enable_options.if_match + if_none_match = None + if job_schedule_enable_options is not None: + if_none_match = job_schedule_enable_options.if_none_match + if_modified_since = None + if job_schedule_enable_options is not None: + if_modified_since = job_schedule_enable_options.if_modified_since + if_unmodified_since = None + if job_schedule_enable_options is not None: + if_unmodified_since = job_schedule_enable_options.if_unmodified_since + + # Construct URL + url = self.enable.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + enable.metadata = {'url': '/jobschedules/{jobScheduleId}/enable'} + + def terminate( + self, job_schedule_id, job_schedule_terminate_options=None, custom_headers=None, raw=False, **operation_config): + """Terminates a Job Schedule. + + :param job_schedule_id: The ID of the Job Schedule to terminates. + :type job_schedule_id: str + :param job_schedule_terminate_options: Additional parameters for the + operation + :type job_schedule_terminate_options: + ~azure.batch.models.JobScheduleTerminateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_terminate_options is not None: + timeout = job_schedule_terminate_options.timeout + client_request_id = None + if job_schedule_terminate_options is not None: + client_request_id = job_schedule_terminate_options.client_request_id + return_client_request_id = None + if job_schedule_terminate_options is not None: + return_client_request_id = job_schedule_terminate_options.return_client_request_id + ocp_date = None + if job_schedule_terminate_options is not None: + ocp_date = job_schedule_terminate_options.ocp_date + if_match = None + if job_schedule_terminate_options is not None: + if_match = job_schedule_terminate_options.if_match + if_none_match = None + if job_schedule_terminate_options is not None: + if_none_match = job_schedule_terminate_options.if_none_match + if_modified_since = None + if job_schedule_terminate_options is not None: + if_modified_since = job_schedule_terminate_options.if_modified_since + if_unmodified_since = None + if job_schedule_terminate_options is not None: + if_unmodified_since = job_schedule_terminate_options.if_unmodified_since + + # Construct URL + url = self.terminate.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + terminate.metadata = {'url': '/jobschedules/{jobScheduleId}/terminate'} + + def add( + self, cloud_job_schedule, job_schedule_add_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a Job Schedule to the specified Account. + + :param cloud_job_schedule: The Job Schedule to be added. + :type cloud_job_schedule: ~azure.batch.models.JobScheduleAddParameter + :param job_schedule_add_options: Additional parameters for the + operation + :type job_schedule_add_options: + ~azure.batch.models.JobScheduleAddOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if job_schedule_add_options is not None: + timeout = job_schedule_add_options.timeout + client_request_id = None + if job_schedule_add_options is not None: + client_request_id = job_schedule_add_options.client_request_id + return_client_request_id = None + if job_schedule_add_options is not None: + return_client_request_id = job_schedule_add_options.return_client_request_id + ocp_date = None + if job_schedule_add_options is not None: + ocp_date = job_schedule_add_options.ocp_date + + # Construct URL + url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(cloud_job_schedule, 'JobScheduleAddParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add.metadata = {'url': '/jobschedules'} + + def list( + self, job_schedule_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the Job Schedules in the specified Account. + + :param job_schedule_list_options: Additional parameters for the + operation + :type job_schedule_list_options: + ~azure.batch.models.JobScheduleListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of CloudJobSchedule + :rtype: + ~azure.batch.models.CloudJobSchedulePaged[~azure.batch.models.CloudJobSchedule] + :raises: + :class:`BatchErrorException` + """ + filter = None + if job_schedule_list_options is not None: + filter = job_schedule_list_options.filter + select = None + if job_schedule_list_options is not None: + select = job_schedule_list_options.select + expand = None + if job_schedule_list_options is not None: + expand = job_schedule_list_options.expand + max_results = None + if job_schedule_list_options is not None: + max_results = job_schedule_list_options.max_results + timeout = None + if job_schedule_list_options is not None: + timeout = job_schedule_list_options.timeout + client_request_id = None + if job_schedule_list_options is not None: + client_request_id = job_schedule_list_options.client_request_id + return_client_request_id = None + if job_schedule_list_options is not None: + return_client_request_id = job_schedule_list_options.return_client_request_id + ocp_date = None + if job_schedule_list_options is not None: + ocp_date = job_schedule_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CloudJobSchedulePaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CloudJobSchedulePaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/jobschedules'} diff --git a/azext/generated/sdk/batch/v2019_08_01/operations/pool_operations.py b/azext/generated/sdk/batch/v2019_08_01/operations/pool_operations.py new file mode 100644 index 00000000..db1b2ae8 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/operations/pool_operations.py @@ -0,0 +1,1635 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class PoolOperations(object): + """PoolOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2019-08-01.10.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2019-08-01.10.0" + + self.config = config + + def list_usage_metrics( + self, pool_list_usage_metrics_options=None, custom_headers=None, raw=False, **operation_config): + """Lists the usage metrics, aggregated by Pool across individual time + intervals, for the specified Account. + + If you do not specify a $filter clause including a poolId, the response + includes all Pools that existed in the Account in the time range of the + returned aggregation intervals. If you do not specify a $filter clause + including a startTime or endTime these filters default to the start and + end times of the last aggregation interval currently available; that + is, only the last aggregation interval is returned. + + :param pool_list_usage_metrics_options: Additional parameters for the + operation + :type pool_list_usage_metrics_options: + ~azure.batch.models.PoolListUsageMetricsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of PoolUsageMetrics + :rtype: + ~azure.batch.models.PoolUsageMetricsPaged[~azure.batch.models.PoolUsageMetrics] + :raises: + :class:`BatchErrorException` + """ + start_time = None + if pool_list_usage_metrics_options is not None: + start_time = pool_list_usage_metrics_options.start_time + end_time = None + if pool_list_usage_metrics_options is not None: + end_time = pool_list_usage_metrics_options.end_time + filter = None + if pool_list_usage_metrics_options is not None: + filter = pool_list_usage_metrics_options.filter + max_results = None + if pool_list_usage_metrics_options is not None: + max_results = pool_list_usage_metrics_options.max_results + timeout = None + if pool_list_usage_metrics_options is not None: + timeout = pool_list_usage_metrics_options.timeout + client_request_id = None + if pool_list_usage_metrics_options is not None: + client_request_id = pool_list_usage_metrics_options.client_request_id + return_client_request_id = None + if pool_list_usage_metrics_options is not None: + return_client_request_id = pool_list_usage_metrics_options.return_client_request_id + ocp_date = None + if pool_list_usage_metrics_options is not None: + ocp_date = pool_list_usage_metrics_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list_usage_metrics.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if start_time is not None: + query_parameters['starttime'] = self._serialize.query("start_time", start_time, 'iso-8601') + if end_time is not None: + query_parameters['endtime'] = self._serialize.query("end_time", end_time, 'iso-8601') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.PoolUsageMetricsPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.PoolUsageMetricsPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list_usage_metrics.metadata = {'url': '/poolusagemetrics'} + + def get_all_lifetime_statistics( + self, pool_get_all_lifetime_statistics_options=None, custom_headers=None, raw=False, **operation_config): + """Gets lifetime summary statistics for all of the Pools in the specified + Account. + + Statistics are aggregated across all Pools that have ever existed in + the Account, from Account creation to the last update time of the + statistics. The statistics may not be immediately available. The Batch + service performs periodic roll-up of statistics. The typical delay is + about 30 minutes. + + :param pool_get_all_lifetime_statistics_options: Additional parameters + for the operation + :type pool_get_all_lifetime_statistics_options: + ~azure.batch.models.PoolGetAllLifetimeStatisticsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PoolStatistics or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.PoolStatistics or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_get_all_lifetime_statistics_options is not None: + timeout = pool_get_all_lifetime_statistics_options.timeout + client_request_id = None + if pool_get_all_lifetime_statistics_options is not None: + client_request_id = pool_get_all_lifetime_statistics_options.client_request_id + return_client_request_id = None + if pool_get_all_lifetime_statistics_options is not None: + return_client_request_id = pool_get_all_lifetime_statistics_options.return_client_request_id + ocp_date = None + if pool_get_all_lifetime_statistics_options is not None: + ocp_date = pool_get_all_lifetime_statistics_options.ocp_date + + # Construct URL + url = self.get_all_lifetime_statistics.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('PoolStatistics', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get_all_lifetime_statistics.metadata = {'url': '/lifetimepoolstats'} + + def add( + self, pool, pool_add_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a Pool to the specified Account. + + When naming Pools, avoid including sensitive information such as user + names or secret project names. This information may appear in telemetry + logs accessible to Microsoft Support engineers. + + :param pool: The Pool to be added. + :type pool: ~azure.batch.models.PoolAddParameter + :param pool_add_options: Additional parameters for the operation + :type pool_add_options: ~azure.batch.models.PoolAddOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_add_options is not None: + timeout = pool_add_options.timeout + client_request_id = None + if pool_add_options is not None: + client_request_id = pool_add_options.client_request_id + return_client_request_id = None + if pool_add_options is not None: + return_client_request_id = pool_add_options.return_client_request_id + ocp_date = None + if pool_add_options is not None: + ocp_date = pool_add_options.ocp_date + + # Construct URL + url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool, 'PoolAddParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add.metadata = {'url': '/pools'} + + def list( + self, pool_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the Pools in the specified Account. + + :param pool_list_options: Additional parameters for the operation + :type pool_list_options: ~azure.batch.models.PoolListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of CloudPool + :rtype: + ~azure.batch.models.CloudPoolPaged[~azure.batch.models.CloudPool] + :raises: + :class:`BatchErrorException` + """ + filter = None + if pool_list_options is not None: + filter = pool_list_options.filter + select = None + if pool_list_options is not None: + select = pool_list_options.select + expand = None + if pool_list_options is not None: + expand = pool_list_options.expand + max_results = None + if pool_list_options is not None: + max_results = pool_list_options.max_results + timeout = None + if pool_list_options is not None: + timeout = pool_list_options.timeout + client_request_id = None + if pool_list_options is not None: + client_request_id = pool_list_options.client_request_id + return_client_request_id = None + if pool_list_options is not None: + return_client_request_id = pool_list_options.return_client_request_id + ocp_date = None + if pool_list_options is not None: + ocp_date = pool_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CloudPoolPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CloudPoolPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/pools'} + + def delete( + self, pool_id, pool_delete_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a Pool from the specified Account. + + When you request that a Pool be deleted, the following actions occur: + the Pool state is set to deleting; any ongoing resize operation on the + Pool are stopped; the Batch service starts resizing the Pool to zero + Compute Nodes; any Tasks running on existing Compute Nodes are + terminated and requeued (as if a resize Pool operation had been + requested with the default requeue option); finally, the Pool is + removed from the system. Because running Tasks are requeued, the user + can rerun these Tasks by updating their Job to target a different Pool. + The Tasks can then run on the new Pool. If you want to override the + requeue behavior, then you should call resize Pool explicitly to shrink + the Pool to zero size before deleting the Pool. If you call an Update, + Patch or Delete API on a Pool in the deleting state, it will fail with + HTTP status code 409 with error code PoolBeingDeleted. + + :param pool_id: The ID of the Pool to delete. + :type pool_id: str + :param pool_delete_options: Additional parameters for the operation + :type pool_delete_options: ~azure.batch.models.PoolDeleteOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_delete_options is not None: + timeout = pool_delete_options.timeout + client_request_id = None + if pool_delete_options is not None: + client_request_id = pool_delete_options.client_request_id + return_client_request_id = None + if pool_delete_options is not None: + return_client_request_id = pool_delete_options.return_client_request_id + ocp_date = None + if pool_delete_options is not None: + ocp_date = pool_delete_options.ocp_date + if_match = None + if pool_delete_options is not None: + if_match = pool_delete_options.if_match + if_none_match = None + if pool_delete_options is not None: + if_none_match = pool_delete_options.if_none_match + if_modified_since = None + if pool_delete_options is not None: + if_modified_since = pool_delete_options.if_modified_since + if_unmodified_since = None + if pool_delete_options is not None: + if_unmodified_since = pool_delete_options.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete.metadata = {'url': '/pools/{poolId}'} + + def exists( + self, pool_id, pool_exists_options=None, custom_headers=None, raw=False, **operation_config): + """Gets basic properties of a Pool. + + :param pool_id: The ID of the Pool to get. + :type pool_id: str + :param pool_exists_options: Additional parameters for the operation + :type pool_exists_options: ~azure.batch.models.PoolExistsOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: bool or ClientRawResponse if raw=true + :rtype: bool or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_exists_options is not None: + timeout = pool_exists_options.timeout + client_request_id = None + if pool_exists_options is not None: + client_request_id = pool_exists_options.client_request_id + return_client_request_id = None + if pool_exists_options is not None: + return_client_request_id = pool_exists_options.return_client_request_id + ocp_date = None + if pool_exists_options is not None: + ocp_date = pool_exists_options.ocp_date + if_match = None + if pool_exists_options is not None: + if_match = pool_exists_options.if_match + if_none_match = None + if pool_exists_options is not None: + if_none_match = pool_exists_options.if_none_match + if_modified_since = None + if pool_exists_options is not None: + if_modified_since = pool_exists_options.if_modified_since + if_unmodified_since = None + if pool_exists_options is not None: + if_unmodified_since = pool_exists_options.if_unmodified_since + + # Construct URL + url = self.exists.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.head(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 404]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = (response.status_code == 200) + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + }) + return client_raw_response + return deserialized + exists.metadata = {'url': '/pools/{poolId}'} + + def get( + self, pool_id, pool_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified Pool. + + :param pool_id: The ID of the Pool to get. + :type pool_id: str + :param pool_get_options: Additional parameters for the operation + :type pool_get_options: ~azure.batch.models.PoolGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: CloudPool or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.CloudPool or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if pool_get_options is not None: + select = pool_get_options.select + expand = None + if pool_get_options is not None: + expand = pool_get_options.expand + timeout = None + if pool_get_options is not None: + timeout = pool_get_options.timeout + client_request_id = None + if pool_get_options is not None: + client_request_id = pool_get_options.client_request_id + return_client_request_id = None + if pool_get_options is not None: + return_client_request_id = pool_get_options.return_client_request_id + ocp_date = None + if pool_get_options is not None: + ocp_date = pool_get_options.ocp_date + if_match = None + if pool_get_options is not None: + if_match = pool_get_options.if_match + if_none_match = None + if pool_get_options is not None: + if_none_match = pool_get_options.if_none_match + if_modified_since = None + if pool_get_options is not None: + if_modified_since = pool_get_options.if_modified_since + if_unmodified_since = None + if pool_get_options is not None: + if_unmodified_since = pool_get_options.if_unmodified_since + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('CloudPool', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/pools/{poolId}'} + + def patch( + self, pool_id, pool_patch_parameter, pool_patch_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified Pool. + + This only replaces the Pool properties specified in the request. For + example, if the Pool has a StartTask associated with it, and a request + does not specify a StartTask element, then the Pool keeps the existing + StartTask. + + :param pool_id: The ID of the Pool to update. + :type pool_id: str + :param pool_patch_parameter: The parameters for the request. + :type pool_patch_parameter: ~azure.batch.models.PoolPatchParameter + :param pool_patch_options: Additional parameters for the operation + :type pool_patch_options: ~azure.batch.models.PoolPatchOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_patch_options is not None: + timeout = pool_patch_options.timeout + client_request_id = None + if pool_patch_options is not None: + client_request_id = pool_patch_options.client_request_id + return_client_request_id = None + if pool_patch_options is not None: + return_client_request_id = pool_patch_options.return_client_request_id + ocp_date = None + if pool_patch_options is not None: + ocp_date = pool_patch_options.ocp_date + if_match = None + if pool_patch_options is not None: + if_match = pool_patch_options.if_match + if_none_match = None + if pool_patch_options is not None: + if_none_match = pool_patch_options.if_none_match + if_modified_since = None + if pool_patch_options is not None: + if_modified_since = pool_patch_options.if_modified_since + if_unmodified_since = None + if pool_patch_options is not None: + if_unmodified_since = pool_patch_options.if_unmodified_since + + # Construct URL + url = self.patch.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool_patch_parameter, 'PoolPatchParameter') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + patch.metadata = {'url': '/pools/{poolId}'} + + def disable_auto_scale( + self, pool_id, pool_disable_auto_scale_options=None, custom_headers=None, raw=False, **operation_config): + """Disables automatic scaling for a Pool. + + :param pool_id: The ID of the Pool on which to disable automatic + scaling. + :type pool_id: str + :param pool_disable_auto_scale_options: Additional parameters for the + operation + :type pool_disable_auto_scale_options: + ~azure.batch.models.PoolDisableAutoScaleOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_disable_auto_scale_options is not None: + timeout = pool_disable_auto_scale_options.timeout + client_request_id = None + if pool_disable_auto_scale_options is not None: + client_request_id = pool_disable_auto_scale_options.client_request_id + return_client_request_id = None + if pool_disable_auto_scale_options is not None: + return_client_request_id = pool_disable_auto_scale_options.return_client_request_id + ocp_date = None + if pool_disable_auto_scale_options is not None: + ocp_date = pool_disable_auto_scale_options.ocp_date + + # Construct URL + url = self.disable_auto_scale.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + disable_auto_scale.metadata = {'url': '/pools/{poolId}/disableautoscale'} + + def enable_auto_scale( + self, pool_id, auto_scale_formula=None, auto_scale_evaluation_interval=None, pool_enable_auto_scale_options=None, custom_headers=None, raw=False, **operation_config): + """Enables automatic scaling for a Pool. + + You cannot enable automatic scaling on a Pool if a resize operation is + in progress on the Pool. If automatic scaling of the Pool is currently + disabled, you must specify a valid autoscale formula as part of the + request. If automatic scaling of the Pool is already enabled, you may + specify a new autoscale formula and/or a new evaluation interval. You + cannot call this API for the same Pool more than once every 30 seconds. + + :param pool_id: The ID of the Pool on which to enable automatic + scaling. + :type pool_id: str + :param auto_scale_formula: The formula for the desired number of + Compute Nodes in the Pool. The formula is checked for validity before + it is applied to the Pool. If the formula is not valid, the Batch + service rejects the request with detailed error information. For more + information about specifying this formula, see Automatically scale + Compute Nodes in an Azure Batch Pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + :param auto_scale_evaluation_interval: The time interval at which to + automatically adjust the Pool size according to the autoscale formula. + The default value is 15 minutes. The minimum and maximum value are 5 + minutes and 168 hours respectively. If you specify a value less than 5 + minutes or greater than 168 hours, the Batch service rejects the + request with an invalid property value error; if you are calling the + REST API directly, the HTTP status code is 400 (Bad Request). If you + specify a new interval, then the existing autoscale evaluation + schedule will be stopped and a new autoscale evaluation schedule will + be started, with its starting time being the time when this request + was issued. + :type auto_scale_evaluation_interval: timedelta + :param pool_enable_auto_scale_options: Additional parameters for the + operation + :type pool_enable_auto_scale_options: + ~azure.batch.models.PoolEnableAutoScaleOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_enable_auto_scale_options is not None: + timeout = pool_enable_auto_scale_options.timeout + client_request_id = None + if pool_enable_auto_scale_options is not None: + client_request_id = pool_enable_auto_scale_options.client_request_id + return_client_request_id = None + if pool_enable_auto_scale_options is not None: + return_client_request_id = pool_enable_auto_scale_options.return_client_request_id + ocp_date = None + if pool_enable_auto_scale_options is not None: + ocp_date = pool_enable_auto_scale_options.ocp_date + if_match = None + if pool_enable_auto_scale_options is not None: + if_match = pool_enable_auto_scale_options.if_match + if_none_match = None + if pool_enable_auto_scale_options is not None: + if_none_match = pool_enable_auto_scale_options.if_none_match + if_modified_since = None + if pool_enable_auto_scale_options is not None: + if_modified_since = pool_enable_auto_scale_options.if_modified_since + if_unmodified_since = None + if pool_enable_auto_scale_options is not None: + if_unmodified_since = pool_enable_auto_scale_options.if_unmodified_since + pool_enable_auto_scale_parameter = models.PoolEnableAutoScaleParameter(auto_scale_formula=auto_scale_formula, auto_scale_evaluation_interval=auto_scale_evaluation_interval) + + # Construct URL + url = self.enable_auto_scale.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool_enable_auto_scale_parameter, 'PoolEnableAutoScaleParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + enable_auto_scale.metadata = {'url': '/pools/{poolId}/enableautoscale'} + + def evaluate_auto_scale( + self, pool_id, auto_scale_formula, pool_evaluate_auto_scale_options=None, custom_headers=None, raw=False, **operation_config): + """Gets the result of evaluating an automatic scaling formula on the Pool. + + This API is primarily for validating an autoscale formula, as it simply + returns the result without applying the formula to the Pool. The Pool + must have auto scaling enabled in order to evaluate a formula. + + :param pool_id: The ID of the Pool on which to evaluate the automatic + scaling formula. + :type pool_id: str + :param auto_scale_formula: The formula for the desired number of + Compute Nodes in the Pool. The formula is validated and its results + calculated, but it is not applied to the Pool. To apply the formula to + the Pool, 'Enable automatic scaling on a Pool'. For more information + about specifying this formula, see Automatically scale Compute Nodes + in an Azure Batch Pool + (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + :type auto_scale_formula: str + :param pool_evaluate_auto_scale_options: Additional parameters for the + operation + :type pool_evaluate_auto_scale_options: + ~azure.batch.models.PoolEvaluateAutoScaleOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: AutoScaleRun or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.AutoScaleRun or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_evaluate_auto_scale_options is not None: + timeout = pool_evaluate_auto_scale_options.timeout + client_request_id = None + if pool_evaluate_auto_scale_options is not None: + client_request_id = pool_evaluate_auto_scale_options.client_request_id + return_client_request_id = None + if pool_evaluate_auto_scale_options is not None: + return_client_request_id = pool_evaluate_auto_scale_options.return_client_request_id + ocp_date = None + if pool_evaluate_auto_scale_options is not None: + ocp_date = pool_evaluate_auto_scale_options.ocp_date + pool_evaluate_auto_scale_parameter = models.PoolEvaluateAutoScaleParameter(auto_scale_formula=auto_scale_formula) + + # Construct URL + url = self.evaluate_auto_scale.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool_evaluate_auto_scale_parameter, 'PoolEvaluateAutoScaleParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('AutoScaleRun', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + evaluate_auto_scale.metadata = {'url': '/pools/{poolId}/evaluateautoscale'} + + def resize( + self, pool_id, pool_resize_parameter, pool_resize_options=None, custom_headers=None, raw=False, **operation_config): + """Changes the number of Compute Nodes that are assigned to a Pool. + + You can only resize a Pool when its allocation state is steady. If the + Pool is already resizing, the request fails with status code 409. When + you resize a Pool, the Pool's allocation state changes from steady to + resizing. You cannot resize Pools which are configured for automatic + scaling. If you try to do this, the Batch service returns an error 409. + If you resize a Pool downwards, the Batch service chooses which Compute + Nodes to remove. To remove specific Compute Nodes, use the Pool remove + Compute Nodes API instead. + + :param pool_id: The ID of the Pool to resize. + :type pool_id: str + :param pool_resize_parameter: The parameters for the request. + :type pool_resize_parameter: ~azure.batch.models.PoolResizeParameter + :param pool_resize_options: Additional parameters for the operation + :type pool_resize_options: ~azure.batch.models.PoolResizeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_resize_options is not None: + timeout = pool_resize_options.timeout + client_request_id = None + if pool_resize_options is not None: + client_request_id = pool_resize_options.client_request_id + return_client_request_id = None + if pool_resize_options is not None: + return_client_request_id = pool_resize_options.return_client_request_id + ocp_date = None + if pool_resize_options is not None: + ocp_date = pool_resize_options.ocp_date + if_match = None + if pool_resize_options is not None: + if_match = pool_resize_options.if_match + if_none_match = None + if pool_resize_options is not None: + if_none_match = pool_resize_options.if_none_match + if_modified_since = None + if pool_resize_options is not None: + if_modified_since = pool_resize_options.if_modified_since + if_unmodified_since = None + if pool_resize_options is not None: + if_unmodified_since = pool_resize_options.if_unmodified_since + + # Construct URL + url = self.resize.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool_resize_parameter, 'PoolResizeParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + resize.metadata = {'url': '/pools/{poolId}/resize'} + + def stop_resize( + self, pool_id, pool_stop_resize_options=None, custom_headers=None, raw=False, **operation_config): + """Stops an ongoing resize operation on the Pool. + + This does not restore the Pool to its previous state before the resize + operation: it only stops any further changes being made, and the Pool + maintains its current state. After stopping, the Pool stabilizes at the + number of Compute Nodes it was at when the stop operation was done. + During the stop operation, the Pool allocation state changes first to + stopping and then to steady. A resize operation need not be an explicit + resize Pool request; this API can also be used to halt the initial + sizing of the Pool when it is created. + + :param pool_id: The ID of the Pool whose resizing you want to stop. + :type pool_id: str + :param pool_stop_resize_options: Additional parameters for the + operation + :type pool_stop_resize_options: + ~azure.batch.models.PoolStopResizeOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_stop_resize_options is not None: + timeout = pool_stop_resize_options.timeout + client_request_id = None + if pool_stop_resize_options is not None: + client_request_id = pool_stop_resize_options.client_request_id + return_client_request_id = None + if pool_stop_resize_options is not None: + return_client_request_id = pool_stop_resize_options.return_client_request_id + ocp_date = None + if pool_stop_resize_options is not None: + ocp_date = pool_stop_resize_options.ocp_date + if_match = None + if pool_stop_resize_options is not None: + if_match = pool_stop_resize_options.if_match + if_none_match = None + if pool_stop_resize_options is not None: + if_none_match = pool_stop_resize_options.if_none_match + if_modified_since = None + if pool_stop_resize_options is not None: + if_modified_since = pool_stop_resize_options.if_modified_since + if_unmodified_since = None + if pool_stop_resize_options is not None: + if_unmodified_since = pool_stop_resize_options.if_unmodified_since + + # Construct URL + url = self.stop_resize.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + stop_resize.metadata = {'url': '/pools/{poolId}/stopresize'} + + def update_properties( + self, pool_id, pool_update_properties_parameter, pool_update_properties_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified Pool. + + This fully replaces all the updatable properties of the Pool. For + example, if the Pool has a StartTask associated with it and if + StartTask is not specified with this request, then the Batch service + will remove the existing StartTask. + + :param pool_id: The ID of the Pool to update. + :type pool_id: str + :param pool_update_properties_parameter: The parameters for the + request. + :type pool_update_properties_parameter: + ~azure.batch.models.PoolUpdatePropertiesParameter + :param pool_update_properties_options: Additional parameters for the + operation + :type pool_update_properties_options: + ~azure.batch.models.PoolUpdatePropertiesOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_update_properties_options is not None: + timeout = pool_update_properties_options.timeout + client_request_id = None + if pool_update_properties_options is not None: + client_request_id = pool_update_properties_options.client_request_id + return_client_request_id = None + if pool_update_properties_options is not None: + return_client_request_id = pool_update_properties_options.return_client_request_id + ocp_date = None + if pool_update_properties_options is not None: + ocp_date = pool_update_properties_options.ocp_date + + # Construct URL + url = self.update_properties.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(pool_update_properties_parameter, 'PoolUpdatePropertiesParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + update_properties.metadata = {'url': '/pools/{poolId}/updateproperties'} + + def remove_nodes( + self, pool_id, node_remove_parameter, pool_remove_nodes_options=None, custom_headers=None, raw=False, **operation_config): + """Removes Compute Nodes from the specified Pool. + + This operation can only run when the allocation state of the Pool is + steady. When this operation runs, the allocation state changes from + steady to resizing. + + :param pool_id: The ID of the Pool from which you want to remove + Compute Nodes. + :type pool_id: str + :param node_remove_parameter: The parameters for the request. + :type node_remove_parameter: ~azure.batch.models.NodeRemoveParameter + :param pool_remove_nodes_options: Additional parameters for the + operation + :type pool_remove_nodes_options: + ~azure.batch.models.PoolRemoveNodesOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if pool_remove_nodes_options is not None: + timeout = pool_remove_nodes_options.timeout + client_request_id = None + if pool_remove_nodes_options is not None: + client_request_id = pool_remove_nodes_options.client_request_id + return_client_request_id = None + if pool_remove_nodes_options is not None: + return_client_request_id = pool_remove_nodes_options.return_client_request_id + ocp_date = None + if pool_remove_nodes_options is not None: + ocp_date = pool_remove_nodes_options.ocp_date + if_match = None + if pool_remove_nodes_options is not None: + if_match = pool_remove_nodes_options.if_match + if_none_match = None + if pool_remove_nodes_options is not None: + if_none_match = pool_remove_nodes_options.if_none_match + if_modified_since = None + if pool_remove_nodes_options is not None: + if_modified_since = pool_remove_nodes_options.if_modified_since + if_unmodified_since = None + if pool_remove_nodes_options is not None: + if_unmodified_since = pool_remove_nodes_options.if_unmodified_since + + # Construct URL + url = self.remove_nodes.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'poolId': self._serialize.url("pool_id", pool_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(node_remove_parameter, 'NodeRemoveParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + remove_nodes.metadata = {'url': '/pools/{poolId}/removenodes'} diff --git a/azext/generated/sdk/batch/v2019_08_01/operations/task_operations.py b/azext/generated/sdk/batch/v2019_08_01/operations/task_operations.py new file mode 100644 index 00000000..41d72859 --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/operations/task_operations.py @@ -0,0 +1,1027 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse + +from .. import models + + +class TaskOperations(object): + """TaskOperations operations. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API Version. Constant value: "2019-08-01.10.0". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2019-08-01.10.0" + + self.config = config + + def add( + self, job_id, task, task_add_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a Task to the specified Job. + + The maximum lifetime of a Task from addition to completion is 180 days. + If a Task has not completed within 180 days of being added it will be + terminated by the Batch service and left in whatever state it was in at + that time. + + :param job_id: The ID of the Job to which the Task is to be added. + :type job_id: str + :param task: The Task to be added. + :type task: ~azure.batch.models.TaskAddParameter + :param task_add_options: Additional parameters for the operation + :type task_add_options: ~azure.batch.models.TaskAddOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_add_options is not None: + timeout = task_add_options.timeout + client_request_id = None + if task_add_options is not None: + client_request_id = task_add_options.client_request_id + return_client_request_id = None + if task_add_options is not None: + return_client_request_id = task_add_options.return_client_request_id + ocp_date = None + if task_add_options is not None: + ocp_date = task_add_options.ocp_date + + # Construct URL + url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(task, 'TaskAddParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + add.metadata = {'url': '/jobs/{jobId}/tasks'} + + def list( + self, job_id, task_list_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the Tasks that are associated with the specified Job. + + For multi-instance Tasks, information such as affinityId, executionInfo + and nodeInfo refer to the primary Task. Use the list subtasks API to + retrieve information about subtasks. + + :param job_id: The ID of the Job. + :type job_id: str + :param task_list_options: Additional parameters for the operation + :type task_list_options: ~azure.batch.models.TaskListOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of CloudTask + :rtype: + ~azure.batch.models.CloudTaskPaged[~azure.batch.models.CloudTask] + :raises: + :class:`BatchErrorException` + """ + filter = None + if task_list_options is not None: + filter = task_list_options.filter + select = None + if task_list_options is not None: + select = task_list_options.select + expand = None + if task_list_options is not None: + expand = task_list_options.expand + max_results = None + if task_list_options is not None: + max_results = task_list_options.max_results + timeout = None + if task_list_options is not None: + timeout = task_list_options.timeout + client_request_id = None + if task_list_options is not None: + client_request_id = task_list_options.client_request_id + return_client_request_id = None + if task_list_options is not None: + return_client_request_id = task_list_options.return_client_request_id + ocp_date = None + if task_list_options is not None: + ocp_date = task_list_options.ocp_date + + def internal_paging(next_link=None, raw=False): + + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if filter is not None: + query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if max_results is not None: + query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + return response + + # Deserialize response + deserialized = models.CloudTaskPaged(internal_paging, self._deserialize.dependencies) + + if raw: + header_dict = {} + client_raw_response = models.CloudTaskPaged(internal_paging, self._deserialize.dependencies, header_dict) + return client_raw_response + + return deserialized + list.metadata = {'url': '/jobs/{jobId}/tasks'} + + def add_collection( + self, job_id, value, task_add_collection_options=None, custom_headers=None, raw=False, **operation_config): + """Adds a collection of Tasks to the specified Job. + + Note that each Task must have a unique ID. The Batch service may not + return the results for each Task in the same order the Tasks were + submitted in this request. If the server times out or the connection is + closed during the request, the request may have been partially or fully + processed, or not at all. In such cases, the user should re-issue the + request. Note that it is up to the user to correctly handle failures + when re-issuing a request. For example, you should use the same Task + IDs during a retry so that if the prior operation succeeded, the retry + will not create extra Tasks unexpectedly. If the response contains any + Tasks which failed to add, a client can retry the request. In a retry, + it is most efficient to resubmit only Tasks that failed to add, and to + omit Tasks that were successfully added on the first attempt. The + maximum lifetime of a Task from addition to completion is 180 days. If + a Task has not completed within 180 days of being added it will be + terminated by the Batch service and left in whatever state it was in at + that time. + + :param job_id: The ID of the Job to which the Task collection is to be + added. + :type job_id: str + :param value: The collection of Tasks to add. The maximum count of + Tasks is 100. The total serialized size of this collection must be + less than 1MB. If it is greater than 1MB (for example if each Task has + 100's of resource files or environment variables), the request will + fail with code 'RequestBodyTooLarge' and should be retried again with + fewer Tasks. + :type value: list[~azure.batch.models.TaskAddParameter] + :param task_add_collection_options: Additional parameters for the + operation + :type task_add_collection_options: + ~azure.batch.models.TaskAddCollectionOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: TaskAddCollectionResult or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.TaskAddCollectionResult or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_add_collection_options is not None: + timeout = task_add_collection_options.timeout + client_request_id = None + if task_add_collection_options is not None: + client_request_id = task_add_collection_options.client_request_id + return_client_request_id = None + if task_add_collection_options is not None: + return_client_request_id = task_add_collection_options.return_client_request_id + ocp_date = None + if task_add_collection_options is not None: + ocp_date = task_add_collection_options.ocp_date + task_collection = models.TaskAddCollectionParameter(value=value) + + # Construct URL + url = self.add_collection.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(task_collection, 'TaskAddCollectionParameter') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('TaskAddCollectionResult', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + add_collection.metadata = {'url': '/jobs/{jobId}/addtaskcollection'} + + def delete( + self, job_id, task_id, task_delete_options=None, custom_headers=None, raw=False, **operation_config): + """Deletes a Task from the specified Job. + + When a Task is deleted, all of the files in its directory on the + Compute Node where it ran are also deleted (regardless of the retention + time). For multi-instance Tasks, the delete Task operation applies + synchronously to the primary task; subtasks and their files are then + deleted asynchronously in the background. + + :param job_id: The ID of the Job from which to delete the Task. + :type job_id: str + :param task_id: The ID of the Task to delete. + :type task_id: str + :param task_delete_options: Additional parameters for the operation + :type task_delete_options: ~azure.batch.models.TaskDeleteOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_delete_options is not None: + timeout = task_delete_options.timeout + client_request_id = None + if task_delete_options is not None: + client_request_id = task_delete_options.client_request_id + return_client_request_id = None + if task_delete_options is not None: + return_client_request_id = task_delete_options.return_client_request_id + ocp_date = None + if task_delete_options is not None: + ocp_date = task_delete_options.ocp_date + if_match = None + if task_delete_options is not None: + if_match = task_delete_options.if_match + if_none_match = None + if task_delete_options is not None: + if_none_match = task_delete_options.if_none_match + if_modified_since = None + if task_delete_options is not None: + if_modified_since = task_delete_options.if_modified_since + if_unmodified_since = None + if task_delete_options is not None: + if_unmodified_since = task_delete_options.if_unmodified_since + + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + }) + return client_raw_response + delete.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}'} + + def get( + self, job_id, task_id, task_get_options=None, custom_headers=None, raw=False, **operation_config): + """Gets information about the specified Task. + + For multi-instance Tasks, information such as affinityId, executionInfo + and nodeInfo refer to the primary Task. Use the list subtasks API to + retrieve information about subtasks. + + :param job_id: The ID of the Job that contains the Task. + :type job_id: str + :param task_id: The ID of the Task to get information about. + :type task_id: str + :param task_get_options: Additional parameters for the operation + :type task_get_options: ~azure.batch.models.TaskGetOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: CloudTask or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.CloudTask or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if task_get_options is not None: + select = task_get_options.select + expand = None + if task_get_options is not None: + expand = task_get_options.expand + timeout = None + if task_get_options is not None: + timeout = task_get_options.timeout + client_request_id = None + if task_get_options is not None: + client_request_id = task_get_options.client_request_id + return_client_request_id = None + if task_get_options is not None: + return_client_request_id = task_get_options.return_client_request_id + ocp_date = None + if task_get_options is not None: + ocp_date = task_get_options.ocp_date + if_match = None + if task_get_options is not None: + if_match = task_get_options.if_match + if_none_match = None + if task_get_options is not None: + if_none_match = task_get_options.if_none_match + if_modified_since = None + if task_get_options is not None: + if_modified_since = task_get_options.if_modified_since + if_unmodified_since = None + if task_get_options is not None: + if_unmodified_since = task_get_options.if_unmodified_since + + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if expand is not None: + query_parameters['$expand'] = self._serialize.query("expand", expand, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('CloudTask', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + get.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}'} + + def update( + self, job_id, task_id, constraints=None, task_update_options=None, custom_headers=None, raw=False, **operation_config): + """Updates the properties of the specified Task. + + :param job_id: The ID of the Job containing the Task. + :type job_id: str + :param task_id: The ID of the Task to update. + :type task_id: str + :param constraints: Constraints that apply to this Task. If omitted, + the Task is given the default constraints. For multi-instance Tasks, + updating the retention time applies only to the primary Task and not + subtasks. + :type constraints: ~azure.batch.models.TaskConstraints + :param task_update_options: Additional parameters for the operation + :type task_update_options: ~azure.batch.models.TaskUpdateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_update_options is not None: + timeout = task_update_options.timeout + client_request_id = None + if task_update_options is not None: + client_request_id = task_update_options.client_request_id + return_client_request_id = None + if task_update_options is not None: + return_client_request_id = task_update_options.return_client_request_id + ocp_date = None + if task_update_options is not None: + ocp_date = task_update_options.ocp_date + if_match = None + if task_update_options is not None: + if_match = task_update_options.if_match + if_none_match = None + if task_update_options is not None: + if_none_match = task_update_options.if_none_match + if_modified_since = None + if task_update_options is not None: + if_modified_since = task_update_options.if_modified_since + if_unmodified_since = None + if task_update_options is not None: + if_unmodified_since = task_update_options.if_unmodified_since + task_update_parameter = models.TaskUpdateParameter(constraints=constraints) + + # Construct URL + url = self.update.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct body + body_content = self._serialize.body(task_update_parameter, 'TaskUpdateParameter') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + update.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}'} + + def list_subtasks( + self, job_id, task_id, task_list_subtasks_options=None, custom_headers=None, raw=False, **operation_config): + """Lists all of the subtasks that are associated with the specified + multi-instance Task. + + If the Task is not a multi-instance Task then this returns an empty + collection. + + :param job_id: The ID of the Job. + :type job_id: str + :param task_id: The ID of the Task. + :type task_id: str + :param task_list_subtasks_options: Additional parameters for the + operation + :type task_list_subtasks_options: + ~azure.batch.models.TaskListSubtasksOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: CloudTaskListSubtasksResult or ClientRawResponse if raw=true + :rtype: ~azure.batch.models.CloudTaskListSubtasksResult or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + select = None + if task_list_subtasks_options is not None: + select = task_list_subtasks_options.select + timeout = None + if task_list_subtasks_options is not None: + timeout = task_list_subtasks_options.timeout + client_request_id = None + if task_list_subtasks_options is not None: + client_request_id = task_list_subtasks_options.client_request_id + return_client_request_id = None + if task_list_subtasks_options is not None: + return_client_request_id = task_list_subtasks_options.return_client_request_id + ocp_date = None + if task_list_subtasks_options is not None: + ocp_date = task_list_subtasks_options.ocp_date + + # Construct URL + url = self.list_subtasks.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if select is not None: + query_parameters['$select'] = self._serialize.query("select", select, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.BatchErrorException(self._deserialize, response) + + deserialized = None + header_dict = {} + + if response.status_code == 200: + deserialized = self._deserialize('CloudTaskListSubtasksResult', response) + header_dict = { + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + } + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + client_raw_response.add_headers(header_dict) + return client_raw_response + + return deserialized + list_subtasks.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/subtasksinfo'} + + def terminate( + self, job_id, task_id, task_terminate_options=None, custom_headers=None, raw=False, **operation_config): + """Terminates the specified Task. + + When the Task has been terminated, it moves to the completed state. For + multi-instance Tasks, the terminate Task operation applies + synchronously to the primary task; subtasks are then terminated + asynchronously in the background. + + :param job_id: The ID of the Job containing the Task. + :type job_id: str + :param task_id: The ID of the Task to terminate. + :type task_id: str + :param task_terminate_options: Additional parameters for the operation + :type task_terminate_options: ~azure.batch.models.TaskTerminateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_terminate_options is not None: + timeout = task_terminate_options.timeout + client_request_id = None + if task_terminate_options is not None: + client_request_id = task_terminate_options.client_request_id + return_client_request_id = None + if task_terminate_options is not None: + return_client_request_id = task_terminate_options.return_client_request_id + ocp_date = None + if task_terminate_options is not None: + ocp_date = task_terminate_options.ocp_date + if_match = None + if task_terminate_options is not None: + if_match = task_terminate_options.if_match + if_none_match = None + if task_terminate_options is not None: + if_none_match = task_terminate_options.if_none_match + if_modified_since = None + if task_terminate_options is not None: + if_modified_since = task_terminate_options.if_modified_since + if_unmodified_since = None + if task_terminate_options is not None: + if_unmodified_since = task_terminate_options.if_unmodified_since + + # Construct URL + url = self.terminate.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + terminate.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/terminate'} + + def reactivate( + self, job_id, task_id, task_reactivate_options=None, custom_headers=None, raw=False, **operation_config): + """Reactivates a Task, allowing it to run again even if its retry count + has been exhausted. + + Reactivation makes a Task eligible to be retried again up to its + maximum retry count. The Task's state is changed to active. As the Task + is no longer in the completed state, any previous exit code or failure + information is no longer available after reactivation. Each time a Task + is reactivated, its retry count is reset to 0. Reactivation will fail + for Tasks that are not completed or that previously completed + successfully (with an exit code of 0). Additionally, it will fail if + the Job has completed (or is terminating or deleting). + + :param job_id: The ID of the Job containing the Task. + :type job_id: str + :param task_id: The ID of the Task to reactivate. + :type task_id: str + :param task_reactivate_options: Additional parameters for the + operation + :type task_reactivate_options: + ~azure.batch.models.TaskReactivateOptions + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`BatchErrorException` + """ + timeout = None + if task_reactivate_options is not None: + timeout = task_reactivate_options.timeout + client_request_id = None + if task_reactivate_options is not None: + client_request_id = task_reactivate_options.client_request_id + return_client_request_id = None + if task_reactivate_options is not None: + return_client_request_id = task_reactivate_options.return_client_request_id + ocp_date = None + if task_reactivate_options is not None: + ocp_date = task_reactivate_options.ocp_date + if_match = None + if task_reactivate_options is not None: + if_match = task_reactivate_options.if_match + if_none_match = None + if task_reactivate_options is not None: + if_none_match = task_reactivate_options.if_none_match + if_modified_since = None + if task_reactivate_options is not None: + if_modified_since = task_reactivate_options.if_modified_since + if_unmodified_since = None + if task_reactivate_options is not None: + if_unmodified_since = task_reactivate_options.if_unmodified_since + + # Construct URL + url = self.reactivate.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), + 'jobId': self._serialize.url("job_id", job_id, 'str'), + 'taskId': self._serialize.url("task_id", task_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + if client_request_id is not None: + header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') + if return_client_request_id is not None: + header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') + if ocp_date is not None: + header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [204]: + raise models.BatchErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + client_raw_response.add_headers({ + 'client-request-id': 'str', + 'request-id': 'str', + 'ETag': 'str', + 'Last-Modified': 'rfc-1123', + 'DataServiceId': 'str', + }) + return client_raw_response + reactivate.metadata = {'url': '/jobs/{jobId}/tasks/{taskId}/reactivate'} diff --git a/azext/generated/sdk/batch/v2019_08_01/version.py b/azext/generated/sdk/batch/v2019_08_01/version.py new file mode 100644 index 00000000..73aa8eab --- /dev/null +++ b/azext/generated/sdk/batch/v2019_08_01/version.py @@ -0,0 +1,13 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +VERSION = "2019-08-01.10.0" + diff --git a/batch-cli-extensions/azext_batch/commands.py b/batch-cli-extensions/azext_batch/commands.py index 4b19dc65..88c50913 100644 --- a/batch-cli-extensions/azext_batch/commands.py +++ b/batch-cli-extensions/azext_batch/commands.py @@ -13,9 +13,9 @@ logger = get_logger(__name__) -MINIMUM_UNSUPPORTED_BATCH_VERSION = "7.1" -MINIMUM_UNSUPPORTED_BMGMT_VERSION = "6.1" -MINIMUM_UNSUPPORTED_BATCH_EXT_VERSION = "6.1" +MINIMUM_UNSUPPORTED_BATCH_VERSION = "9.0" +MINIMUM_UNSUPPORTED_BMGMT_VERSION = "8.0" +MINIMUM_UNSUPPORTED_BATCH_EXT_VERSION = "8.0" def confirm_version(current, supported, package): diff --git a/batch-cli-extensions/azext_batch/custom.py b/batch-cli-extensions/azext_batch/custom.py index ad3dd109..513f3ea9 100644 --- a/batch-cli-extensions/azext_batch/custom.py +++ b/batch-cli-extensions/azext_batch/custom.py @@ -7,7 +7,6 @@ from knack.log import get_logger from knack.prompting import prompt -from azext.batch import _template_utils as templates from azext.batch.errors import CreateTasksErrorException from azext.batch.models import PoolAddParameter, JobAddParameter, JobConstraints from azext.batch.operations import ExtendedPoolOperations, ExtendedJobOperations @@ -26,9 +25,6 @@ def create_pool(client, template=None, parameters=None, json_file=None, id=None, certificate_references=None, application_package_references=None, metadata=None): # pylint: disable=too-many-branches, too-many-statements from azext.batch.errors import MissingParameterValue - from azext.batch.models import ( - PoolAddOptions, StartTask, ImageReference, - CloudServiceConfiguration, VirtualMachineConfiguration) if template or json_file: if template: json_obj = None @@ -36,23 +32,23 @@ def create_pool(client, template=None, parameters=None, json_file=None, id=None, template_obj = get_file_json(template) while json_obj is None: try: - json_obj = client.pool.expand_template(template_obj, parameters) + json_obj = client.pool_extensions.expand_template(template_obj, parameters) except MissingParameterValue as error: param_prompt = error.parameter_name param_prompt += " ({}): ".format(error.parameter_description) parameters[error.parameter_name] = prompt(param_prompt) except TypeError as error: raise ValueError(str(error)) - else: - json_obj = json_obj.get('properties', json_obj) else: json_obj = get_file_json(json_file) # validate the json file pool = ExtendedPoolOperations.poolparameter_from_json(json_obj) if pool is None: raise ValueError("JSON pool parameter is not in correct format.") - templates.validate_json_object(json_obj, pool) else: + from azext.batch.models import ( + PoolAddOptions, StartTask, ImageReference, + CloudServiceConfiguration, VirtualMachineConfiguration) if not id: raise ValueError('Please supply template, json_file, or id') @@ -120,7 +116,6 @@ def create_job(client, template=None, parameters=None, json_file=None, id=None, job_manager_task_id=None, job_manager_task_resource_files=None): # pylint: disable=too-many-branches, too-many-statements from azext.batch.errors import MissingParameterValue - from azext.batch.models import JobManagerTask, JobAddOptions, PoolInformation if template or json_file: if template: json_obj = None @@ -128,27 +123,20 @@ def create_job(client, template=None, parameters=None, json_file=None, id=None, template_obj = get_file_json(template) while json_obj is None: try: - json_obj = client.job.expand_template(template_obj, parameters) + json_obj = client.job_extensions.expand_template(template_obj, parameters) except MissingParameterValue as error: param_prompt = error.parameter_name param_prompt += " ({}): ".format(error.parameter_description) parameters[error.parameter_name] = prompt(param_prompt) except TypeError as error: raise ValueError(str(error)) - else: - json_obj = json_obj.get('properties', json_obj) else: json_obj = get_file_json(json_file) - # validate the json file - try: - job = ExtendedJobOperations.jobparameter_from_json(json_obj) - except NotImplementedError: - logger.error("The specified template API version is not supported by the current SDK extension") - raise + job = ExtendedJobOperations.jobparameter_from_json(json_obj) if job is None: raise ValueError("JSON job parameter is not in correct format.") - templates.validate_json_object(json_obj, job) else: + from azext.batch.models import JobManagerTask, JobAddOptions, PoolInformation if not id: raise ValueError('Please supply template, json_file, or id') @@ -172,11 +160,10 @@ def create_job(client, template=None, parameters=None, json_file=None, id=None, add_option = JobAddOptions() try: - client.job.add(job, add_option, threads=multiprocessing.cpu_count()//2) + client.job_extensions.add(job, add_option, threads=multiprocessing.cpu_count()//2) except CreateTasksErrorException as e: for error in e.failures: logger.warning(error.task_id + " failed to be added due to " + error.error.code) - create_job.__doc__ = JobAddParameter.__doc__ + "\n" + JobConstraints.__doc__ diff --git a/pylintrc b/pylintrc index 86387600..f92661cd 100644 --- a/pylintrc +++ b/pylintrc @@ -1,5 +1,6 @@ [MASTER] ignore-patterns=test_*,.*?_py3.py +ignore=_vendor reports=no [MESSAGES CONTROL] diff --git a/readme.python-multiclient.md b/readme.python-multiclient.md new file mode 100644 index 00000000..5338e90b --- /dev/null +++ b/readme.python-multiclient.md @@ -0,0 +1,87 @@ +## Python + +These settings apply only when `--python` is specified on the command line. +Please also specify `--python-sdks-folder=`. +Use `--python-mode=update` if you already have a setup.py and just want to update the code itself. + +``` yaml $(python) +python: + azure-arm: true + license-header: MICROSOFT_MIT_NO_VERSION + payload-flattening-threshold: 2 + package-name: azure-batch + clear-output-folder: true +``` + +### Python multi-api + +Generate all API versions currently shipped for this package + +```yaml $(python) && $(multiapi) +batch: + - tag: package-2018-03-01-only + - tag: package-2018-08-01-only + - tag: package-2018-12-01-only + - tag: package-2019-06-01-only + - tag: package-2019-08-01-only +``` + +### Tag: package-2018-03-01-only and python + +These settings apply only when `--tag=package-2018-03-01-only --python` is specified on the command line. +Please also specify `--python-sdks-folder=`. + +``` yaml $(tag) == 'package-2018-03-01-only' && $(python) +python: + namespace: azure.batch.v2018_03_01 + output-folder: $(python-sdks-folder)/sdk/batch/azure-batch/v2018_03_01 + input-file: $(rest-api-folder)/2018-03-01.6.1/BatchService.json +``` + +### Tag: package-2018-08-01-only and python + +These settings apply only when `--tag=package-2018-08-01-only --python` is specified on the command line. +Please also specify `--python-sdks-folder=`. + +``` yaml $(tag) == 'package-2018-08-01-only' && $(python) +python: + namespace: azure.batch.v2018_08_01 + output-folder: $(python-sdks-folder)/sdk/batch/azure-batch/v2018_08_01 + input-file: $(rest-api-folder)/2018-08-01.7.0/BatchService.json +``` + +### Tag: package-2018-12-01-only and python + +These settings apply only when `--tag=package-2018-12-01-only --python` is specified on the command line. +Please also specify `--python-sdks-folder=`. + +``` yaml $(tag) == 'package-2018-12-01-only' && $(python) +python: + namespace: azure.batch.v2018_12_01 + output-folder: $(python-sdks-folder)/sdk/batch/azure-batch/v2018_12_01 + input-file: $(rest-api-folder)/2018-12-01.8.0/BatchService.json +``` + +### Tag: package-2019-06-01-only and python + +These settings apply only when `--tag=package-2019-06-01-only --python` is specified on the command line. +Please also specify `--python-sdks-folder=`. + +``` yaml $(tag) == 'package-2019-06-01-only' && $(python) +python: + namespace: azure.batch.v2019_06_01 + output-folder: $(python-sdks-folder)/sdk/batch/azure-batch/v2019_06_01 + input-file: $(rest-api-folder)/2019-06-01.9.0/BatchService.json +``` + +### Tag: package-2019-08-01-only and python + +These settings apply only when `--tag=package-2019-08-01-only --python` is specified on the command line. +Please also specify `--python-sdks-folder=`. + +``` yaml $(tag) == 'package-2019-08-01-only' && $(python) +python: + namespace: azure.batch.v2019_08_01 + output-folder: $(python-sdks-folder)/sdk/batch/azure-batch/v2019_08_01 + input-file: $(rest-api-folder)/2019-08-01.10.0/BatchService.json +``` diff --git a/samples/sdk/blender.py b/samples/sdk/blender.py index d1e86007..c6f32c18 100644 --- a/samples/sdk/blender.py +++ b/samples/sdk/blender.py @@ -47,7 +47,7 @@ # Load template file and parameters path_to_template = os.path.join(SAMPLE_DIR, "blender", "render.json") path_to_parameters = os.path.join(SAMPLE_DIR, "blender", "parameters.json") - job_json = client.job.expand_template(path_to_template, path_to_parameters) + job_json = client.job_extensions.expand_template(path_to_template, path_to_parameters) # Create job job = operations.ExtendedJobOperations.jobparameter_from_json(job_json) diff --git a/samples/sdk/blender_with_app_template.py b/samples/sdk/blender_with_app_template.py index 1186071e..dea79531 100644 --- a/samples/sdk/blender_with_app_template.py +++ b/samples/sdk/blender_with_app_template.py @@ -54,7 +54,7 @@ with open(path_to_pool, 'r') as template: pool_json = json.load(template) pool_parameters = {'poolId': pool_id} - pool_json = client.pool.expand_template(pool_json, pool_parameters) + pool_json = client.pool_extensions.expand_template(pool_json, pool_parameters) pool = operations.ExtendedPoolOperations.poolparameter_from_json(pool_json) try: client.pool.add(pool) diff --git a/samples/sdk/ffmpeg.py b/samples/sdk/ffmpeg.py index 0367e2ab..31b5e670 100644 --- a/samples/sdk/ffmpeg.py +++ b/samples/sdk/ffmpeg.py @@ -52,7 +52,7 @@ ## Create pool from template pool_template = os.path.join(SAMPLE_DIR, 'ffmpeg', 'pool.json') - pool_json = client.pool.expand_template(pool_template) + pool_json = client.pool_extensions.expand_template(pool_template) pool_param = operations.ExtendedPoolOperations.poolparameter_from_json(pool_json) client.pool.add(pool_param) @@ -72,7 +72,7 @@ "value": pool_param.properties.id } } - job_def = client.job.expand_template(job_template, parameters) + job_def = client.job_extensions.expand_template(job_template, parameters) job_param = operations.ExtendedJobOperations.jobparameter_from_json(job_def) client.job.add(job_param) diff --git a/scripts/license/_common.py b/scripts/license/_common.py index d6eca574..5de63fb2 100644 --- a/scripts/license/_common.py +++ b/scripts/license/_common.py @@ -7,13 +7,25 @@ ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..')) -PY_LICENSE_HEADER = \ +EXTENSION_SDK_LICENSE_HEADER = \ """# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- """ +PYTHON_SDK_LICENSE_HEADER = \ +"""# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +""" + env_folders = [ os.path.join(ROOT_DIR, 'env'), os.path.join(ROOT_DIR, 'package-verify-env'), @@ -21,7 +33,7 @@ ] def contains_header(text): - return PY_LICENSE_HEADER in text + return (EXTENSION_SDK_LICENSE_HEADER in text or PYTHON_SDK_LICENSE_HEADER in text) def has_shebang(text): return text.startswith('#!') diff --git a/tests/data/batch.job.resourcefile-legacy.json b/tests/data/batch.job.resourcefile-legacy.json index 01a34d3b..7bdb34ae 100644 --- a/tests/data/batch.job.resourcefile-legacy.json +++ b/tests/data/batch.job.resourcefile-legacy.json @@ -1,7 +1,7 @@ { "job": { "type": "Microsoft.Batch/batchAccounts/jobs", - "apiVersion": "2018-12-01", + "apiVersion": "2018-08-01.7.0", "properties": { "id": "job123", "onAllTasksComplete": "terminateJob", @@ -33,7 +33,7 @@ "commandLine": "/bin/bash -c 'cat {fileName}'", "resourceFiles": [ { - "httpUrl": "https://testacct.blob.core.windows.net/", + "blobSource": "https://testacct.blob.core.windows.net/", "filePath": "location" } ] diff --git a/tests/data/batch.pool.simple.resourcefile-legacy.json b/tests/data/batch.pool.simple.resourcefile-legacy.json index fd2b8c10..519f4fdb 100644 --- a/tests/data/batch.pool.simple.resourcefile-legacy.json +++ b/tests/data/batch.pool.simple.resourcefile-legacy.json @@ -1,36 +1,40 @@ { "pool": { - "id": "blobsource1", - "displayName": "Blender Ubuntu standard pool", - "vmSize": "Standard_D1_v2", - "virtualMachineConfiguration": { - "imageReference": { - "publisher": "Canonical", - "offer" : "UbuntuServer", - "sku": "16.04.0-LTS", - "version": "latest" + "type": "Microsoft.Batch/batchAccounts/pools", + "apiVersion": "2018-08-01", + "properties": { + "id": "blobsource2", + "displayName": "Blender Ubuntu standard pool", + "vmSize": "Standard_D1_v2", + "virtualMachineConfiguration": { + "imageReference": { + "publisher": "Canonical", + "offer": "UbuntuServer", + "sku": "16.04.0-LTS", + "version": "latest" + }, + "nodeAgentSKUId": "batch.node.ubuntu 16.04" }, - "nodeAgentSKUId": "batch.node.ubuntu 16.04" - }, - "targetDedicatedNodes": "1", - "targetLowPriorityNodes": "0", - "enableAutoScale": false, - "startTask": { - "commandLine": "sleep 1", - "waitForSuccess": true, - "maxTaskRetryCount": 0, - "userIdentity": { - "autoUser": { - "scope": "pool", - "elevationLevel": "admin" - } - }, - "resourceFiles": [ - { - "blobSource": "https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh", - "filePath": "setup-linux-pool.sh" - } - ] + "targetDedicatedNodes": "1", + "targetLowPriorityNodes": "0", + "enableAutoScale": false, + "startTask": { + "commandLine": "sleep 1", + "waitForSuccess": true, + "maxTaskRetryCount": 0, + "userIdentity": { + "autoUser": { + "scope": "pool", + "elevationLevel": "admin" + } + }, + "resourceFiles": [ + { + "blobSource": "https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh", + "filePath": "setup-linux-pool.sh" + } + ] + } } } } \ No newline at end of file diff --git a/tests/recordings/test_batch_extensions_live.yaml b/tests/recordings/test_batch_extensions_live.yaml deleted file mode 100644 index b227363d..00000000 --- a/tests/recordings/test_batch_extensions_live.yaml +++ /dev/null @@ -1,18911 +0,0 @@ -{ - "version": 1, - "interactions": [ - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/aaatestcontainer?restype=container&comp=list", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "69f4c414-c371-11e9-ba9b-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:39:08 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:pXWwZGjW0/MuBWjndgsKTje23wxfE9TQolEFG4GDHWY=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "Date": [ - "Tue, 20 Aug 2019 17:39:08 GMT" - ] - }, - "body": { - "string": "\ufeff" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/supportedimages?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "6a1c4ef0-c371-11e9-9fa5-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:WP1bPW3/llJrplS1jWd98aIJgVitv/mizr9fuEg/Qj0=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "e141e677-28f7-4d23-b862-46609acd0138" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#supportedimages\",\"value\":[\r\n {\r\n \"imageReference\":{\r\n \"publisher\":\"batch\",\"offer\":\"rendering-centos73\",\"sku\":\"rendering\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"batch\",\"offer\":\"rendering-windows2016\",\"sku\":\"rendering\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04.0-lts\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"18.04-lts\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 18.04\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"credativ\",\"offer\":\"debian\",\"sku\":\"8\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.debian 8\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"credativ\",\"offer\":\"debian\",\"sku\":\"8-backports\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.debian 8\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"credativ\",\"offer\":\"debian\",\"sku\":\"9\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.debian 9\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"credativ\",\"offer\":\"debian\",\"sku\":\"9-backports\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.debian 9\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-ads\",\"offer\":\"linux-data-science-vm\",\"sku\":\"linuxdsvm\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-ads\",\"offer\":\"standard-data-science-vm\",\"sku\":\"standard-data-science-vm\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"centos-container\",\"sku\":\"7-4\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"NvidiaGridDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"centos-container\",\"sku\":\"7-5\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"NvidiaGridDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"centos-container\",\"sku\":\"7-6\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"NvidiaGridDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"centos-container-rdma\",\"sku\":\"7-4\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"SupportsRDMAOnly\",\"IntelMPIRuntimeInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"ubuntu-server-container\",\"sku\":\"16-04-lts\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"NvidiaGridDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"ubuntu-server-container-rdma\",\"sku\":\"16-04-lts\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"SupportsRDMAOnly\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-dsvm\",\"offer\":\"azureml\",\"sku\":\"runtime\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-dsvm\",\"offer\":\"dsvm-windows\",\"sku\":\"server-2016\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-dsvm\",\"offer\":\"linux-data-science-vm-ubuntu\",\"sku\":\"linuxdsvmubuntu\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2008-r2-sp1\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2008-r2-sp1-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-datacenter\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-datacenter-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2016-datacenter\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2016-datacenter-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2016-datacenter-with-containers\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-core\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-core-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-core-with-containers\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-core-with-containers-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-with-containers\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-with-containers-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserversemiannual\",\"sku\":\"datacenter-core-1709-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserversemiannual\",\"sku\":\"datacenter-core-1709-with-containers-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserversemiannual\",\"sku\":\"datacenter-core-1803-with-containers-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserversemiannual\",\"sku\":\"datacenter-core-1809-with-containers-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.0\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.1\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.2\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.3\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.4\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.5\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.6\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos-hpc\",\"sku\":\"7.1\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"SupportsRDMAOnly\",\"IntelMPIRuntimeInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos-hpc\",\"sku\":\"7.3\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"SupportsRDMAOnly\",\"IntelMPIRuntimeInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos-hpc\",\"sku\":\"7.4\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"SupportsRDMAOnly\",\"IntelMPIRuntimeInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos-hpc\",\"sku\":\"7.6\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"SupportsRDMAOnly\",\"IntelMPIRuntimeInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"oracle\",\"offer\":\"oracle-linux\",\"sku\":\"7.3\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"oracle\",\"offer\":\"oracle-linux\",\"sku\":\"7.4\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"oracle\",\"offer\":\"oracle-linux\",\"sku\":\"7.5\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"oracle\",\"offer\":\"oracle-linux\",\"sku\":\"7.6\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools?api-version=2019-08-01.10.0", - "body": "{\"id\": \"ncj-ubuntu1604\", \"vmSize\": \"STANDARD_D1_V2\", \"virtualMachineConfiguration\": {\"imageReference\": {\"publisher\": \"canonical\", \"offer\": \"ubuntuserver\", \"sku\": \"16.04-lts\"}, \"nodeAgentSKUId\": \"batch.node.ubuntu 16.04\"}, \"targetDedicatedNodes\": 1}", - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Content-Type": [ - "application/json; odata=minimalmetadata; charset=utf-8" - ], - "client-request-id": [ - "6a476624-c371-11e9-a683-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "248" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:DKyBEpswRRv3z4JvQkK9rJwAGfeJ2nZ9yEvbHPBJK5U=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "DataServiceId": [ - "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604" - ], - "Date": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Location": [ - "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604" - ], - "Transfer-Encoding": [ - "chunked" - ], - "request-id": [ - "ae0a2072-8478-41f3-9281-23c72c3046aa" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "6a836c86-c371-11e9-b6a4-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:gwbzkWoEQL4KjRlWZy0bGiwPcRO5gPsK7RDlg0KzKjM=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "d6bbca3d-edff-4cac-b2cc-1117359a8892" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "6c56765a-c371-11e9-a076-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:39:12 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:HsHtu0noEtsf+H9AMbqZ9rhdA2ZdfduWeiG1sLYLECk=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:39:12 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "9f1ef8a6-681b-420e-8fe6-efa0087c5f6b" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "6e2a3a64-c371-11e9-a8f0-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:39:15 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:04NVwiSjN834FL2Pi17YFtgdY8AWEIU4D79jpNL314A=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:39:15 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "50a6969b-7d7c-468e-ab60-36bd2103ef7d" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "6ffccfb6-c371-11e9-8e66-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:39:18 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:IQ4V/su9VwoeJVu9PEjB1LKhtRgXBVivo5qB5359LWQ=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:39:18 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "08d7c6a8-af15-4b69-941b-359c9cf26a1e" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "71cf76a4-c371-11e9-b531-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:39:21 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:qFgi4+BaOY7Crv/YqrpjP/rHviBjyZYMkjpuexVj0h0=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:39:21 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "26b8cdf3-f6b2-492f-a916-582e9cbfb1ad" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "73a0a066-c371-11e9-9cce-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:39:25 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:F1NG1Ttx3RRWNK3kD4uFirndy3TA061PoWsptK2jyt4=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:39:24 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "28be9f29-41ef-4c76-af64-5d4e81f1b58f" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "7572307a-c371-11e9-98e1-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:39:28 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:dqDhH0zhCYtqpFAnGV6avNjbbieLNEaMXeJ+LP+zBmQ=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:39:27 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "fe57710d-fc75-4288-9e72-0433c64c905b" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "7744d77a-c371-11e9-ad1d-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:39:31 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:lPK36U22cmvdmbHGAyw18O+lXAZsXuZeaeRm3EM+HGI=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:39:31 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "20b1590c-3e41-46fd-90e2-02010d62e865" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "79183ae2-c371-11e9-a371-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:39:34 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:yvF1588cOs686ruw2Frp2hZe6TFcV3u2Zh27DAZFsJ8=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:39:33 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "67878375-15a0-46f9-b62a-9f796c1e27da" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "7aea4e2c-c371-11e9-b29b-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:39:37 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:EqJhKiYtsOv0ahYOfSKgdH611VBn45UyYkcj6wVesck=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:39:37 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "a8e774a0-1cb8-405b-8cd0-6c92b161c35b" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "7cbda8ec-c371-11e9-89e9-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:39:40 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:3woEiTsctogm5pLGhmcRf1hUCplsNxnTpuTlVtvewME=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:39:39 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "bbbf381b-5a23-449e-8e2f-19b576987977" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "7e91b5ac-c371-11e9-be98-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:39:43 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:yl+t9V8tczYabu1yejzgay56VSBwNZO4jNBS18RG9lo=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:39:42 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "71eb7b07-b600-4b86-9f1d-4deae7fcd282" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "806402fa-c371-11e9-8e1d-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:39:46 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:KgD8HVmlEPFXQ20c0YhzsYFJb7VT41m4CWC6aqidAq8=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:39:45 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "c494789c-c5fb-4c77-afb3-46435646e75b" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "823558a4-c371-11e9-b956-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:39:49 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:tKakqhaHp6+4oiK4zo4bNgALetFvIpGwOacU7d/booY=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:39:49 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "04e1c128-4d0c-4255-9da0-e9c09bc89526" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "840725dc-c371-11e9-81e6-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:39:52 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:0cLpUfaoIx0mGlaIoMYUi6OBTkMwWG8X7lSHTQcyPJ8=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:39:52 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "09d528f0-ac98-436f-b6f5-b1052e6fed6d" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "85d8d354-c371-11e9-ab8b-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:39:55 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:GPPtmRYUL2K5iPBf01lyRZI1BcpfPZvhX0FqZvd2OMs=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:39:55 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "94868be8-cae4-4f0e-bc00-1ac2ef00d4b4" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "87ae0c68-c371-11e9-8cbb-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:39:58 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:+AV3jMin2QM7857gYUEHzZoW5+v+E9lD0mogrIj1hb4=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:39:58 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "51374efa-79cd-43bd-8827-4d333b2ee720" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "898098f6-c371-11e9-b57d-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:01 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:jw91ur/OHkSoRNTBvQaT3fWdBKVcA+3+su9Hfq//ShQ=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:01 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "772dfeb5-eea1-438b-b917-dc621af48cc3" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "8b51f440-c371-11e9-b5d5-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:04 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:jJxFpvl6x5NiCsndQ04U9cSBcIyx3hm6uZy7v198o8E=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:04 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "0e05282a-7754-4eb7-b137-54dd56bcbaba" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "8d251b9c-c371-11e9-9a9c-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:07 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:ZjgpVQ86BRAp9WwEsZU8yO7EW5ArWrALvrn1hvcY8rg=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:06 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "bc9590e8-9c15-4d5a-87c5-3baff66c7bda" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "8ef6e464-c371-11e9-af6d-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:10 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:/dEsBQxi/Om3fd2t9nwBslsnOUHYK/t8sHR+qMPzzOQ=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:10 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "7419094a-aa16-489e-92aa-36c86b3e2ed5" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "90c8de68-c371-11e9-8b58-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:13 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:29iih/nrq2CFY490bMOnMWMxubgZlf2dMRshBaGkPbs=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:13 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "f686e463-37a3-4e90-ab6f-6de57e64e59f" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "929ccd2c-c371-11e9-a581-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:16 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:uyyC2lyKkdCfEdYJuvl+qIYkJYi1imssW72mYWT1h6M=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:16 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "2482022e-332b-48c5-891b-cbf8710588f9" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"steady\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:16.1469849Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":1,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "92a44834-c371-11e9-ba58-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:17 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:a4AEkGOpd45ojqsfIPZFGGmTvAteV5OaBfPe3f8vymI=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:16 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "05c650bd-4bf7-48ea-8d4b-a2c128667859" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604/nodes/tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:40:16.0922226Z\",\"allocationTime\":\"2019-08-20T17:40:15.2984459Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.16.171\",\"publicFQDN\":\"dns07c600ce-fa0e-4d8b-8d1f-948d1d69b8c7-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":22\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "9475a2ae-c371-11e9-8dd2-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:20 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:KmtmNyWC9j5zNTZ113ABvQlg4E7KEuHbY+K9PQx9EAg=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:20 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "d96567fb-a088-4460-a4e9-dc2b2e87dea8" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604/nodes/tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:40:16.0922226Z\",\"allocationTime\":\"2019-08-20T17:40:15.2984459Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.16.171\",\"publicFQDN\":\"dns07c600ce-fa0e-4d8b-8d1f-948d1d69b8c7-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":22\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "9648e4ba-c371-11e9-9bf4-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:23 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:5/8Qj4tu0VTvu2Hh9AcnMWoe/QnDLmnIuyYlOSOkUkQ=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:23 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "684a0487-829c-48da-b957-ae593c06357d" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604/nodes/tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"state\":\"idle\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:40:21.296399Z\",\"lastBootTime\":\"2019-08-20T17:40:21.194161Z\",\"allocationTime\":\"2019-08-20T17:40:15.2984459Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"certificateReferences\":[\r\n \r\n ],\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.16.171\",\"publicFQDN\":\"dns07c600ce-fa0e-4d8b-8d1f-948d1d69b8c7-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":22\r\n }\r\n ]\r\n },\"nodeAgentInfo\":{\r\n \"lastUpdateTime\":\"2019-08-20T17:40:21.194161Z\",\"version\":\"1.6.4\"\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs?api-version=2019-08-01.10.0&timeout=30", - "body": "{\"id\": \"ncj-ubuntu1604\", \"poolInfo\": {\"poolId\": \"ncj-ubuntu1604\"}}", - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL2JhdGNoLmNvcmUud2luZG93cy5uZXQvIiwiaXNzIjoiaHR0cHM6Ly9zdHMud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3LyIsImlhdCI6MTU2NjMyMDcyNCwibmJmIjoxNTY2MzIwNzI0LCJleHAiOjE1NjYzMjQ2MjQsIl9jbGFpbV9uYW1lcyI6eyJncm91cHMiOiJzcmMxIn0sIl9jbGFpbV9zb3VyY2VzIjp7InNyYzEiOnsiZW5kcG9pbnQiOiJodHRwczovL2dyYXBoLndpbmRvd3MubmV0LzcyZjk4OGJmLTg2ZjEtNDFhZi05MWFiLTJkN2NkMDExZGI0Ny91c2Vycy8yNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEvZ2V0TWVtYmVyT2JqZWN0cyJ9fSwiYWNyIjoiMSIsImFpbyI6IkFVUUF1LzhNQUFBQUJmSVhrS1pUNXN2dGVyVzhpeVgyQ1JCODlJc2dTVFJtZFdPeHR0aFNMVXZzZEtwd0YxTmloNjFtcEdMYjRnNmxES01Md0lMTmtBSkhCblBCSithdU5BPT0iLCJhbXIiOlsicnNhIiwibWZhIl0sImFwcGlkIjoiMDRiMDc3OTUtOGRkYi00NjFhLWJiZWUtMDJmOWUxYmY3YjQ2IiwiYXBwaWRhY3IiOiIwIiwiZGV2aWNlaWQiOiIxZDUwYzVkZi1mZDAxLTRhNjQtODg1OS04NDcyMzc1OGEyNDQiLCJmYW1pbHlfbmFtZSI6IktsZWluIiwiZ2l2ZW5fbmFtZSI6IkJyYW5kb24iLCJpcGFkZHIiOiIxMzEuMTA3LjE1OS4yMiIsIm5hbWUiOiJCcmFuZG9uIEtsZWluIiwib2lkIjoiMjcyNDQ5MzUtYTRiOS00MGE0LWEyNzItNDI5NDJiNjdlY2YxIiwib25wcmVtX3NpZCI6IlMtMS01LTIxLTIxMjc1MjExODQtMTYwNDAxMjkyMC0xODg3OTI3NTI3LTMwODY5MTc0IiwicHVpZCI6IjEwMDMwMDAwQTkxNzc4OUUiLCJzY3AiOiJ1c2VyX2ltcGVyc29uYXRpb24iLCJzdWIiOiJaTnRJSW14ajVlSk9TbnJRTXh1UTFGeGVPOHhiYnVhQmFrU0FYYjRqRE84IiwidGlkIjoiNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3IiwidW5pcXVlX25hbWUiOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1cG4iOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1dGkiOiIzcU02WE1IMS1VeTc1OGREc3VFUUFBIiwidmVyIjoiMS4wIn0.6wF-URC5pN8R6lYNu887Vqul47X3Kpm5g_d0S6tYjtW42KcCv95dvXiWr3_xQ62vDBHLekWJUFTDt-JIa-7Jothw-k4LGIe4OyT3c5VeLMupH5fepX8puj3cfxUAubdUIwq3nw8XHksD979tOyFh_lOCyHPNs69UgpQUufHkX-262eCQjlQoXTigdmxd4uhW7ybcLKxTyIh16K8JI3tHU6lQQDeKGDVqgkXTWqAHWhlHiaZ8SYsfjV07lLS-YnBmjyM16WHnDCaUwDy326rKfbdsAS2r6br2NERDpX_yoq01rFxP1mzQrnokb7sAJBQbV5dqalO3kU0JwvcGwhO3hQ" - ], - "Content-Type": [ - "application/json; odata=minimalmetadata; charset=utf-8" - ], - "client-request-id": [ - "96aafb54-c371-11e9-b133-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "return-client-request-id": [ - "false" - ], - "Content-Length": [ - "66" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "DataServiceId": [ - "https://sdktest2.westcentralus.batch.azure.com/jobs/job-1" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:24 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "ETag": [ - "0x8D725957B414460" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:24 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Location": [ - "https://sdktest2.westcentralus.batch.azure.com/jobs/job-1" - ], - "Transfer-Encoding": [ - "chunked" - ], - "request-id": [ - "0b00045c-89d5-4083-8403-ce95fed68953" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604/addtaskcollection?api-version=2019-08-01.10.0", - "body": "{\"value\": [{\"id\": \"myTask\", \"commandLine\": \"/bin/bash -c \\\"echo test\\\"\", \"outputFiles\": [{\"filePattern\": \"$AZ_BATCH_TASK_DIR/*.txt\", \"destination\": {\"container\": {\"containerUrl\": \"https://sdkteststore2.blob.core.windows.net/aaatestcontainer?st=2019-08-20T17%3A39%3A08Z&se=2019-08-21T17%3A39%3A08Z&sp=rw&sv=2017-07-29&sr=c&sig=6MqdvD/tVrcMqBh/xoltuDEmtGg79s3sE1V3lcoFaoM%3D\"}}, \"uploadOptions\": {\"uploadCondition\": \"tasksuccess\"}}], \"constraints\": {\"retentionTime\": \"PT1H\"}}]}", - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL2JhdGNoLmNvcmUud2luZG93cy5uZXQvIiwiaXNzIjoiaHR0cHM6Ly9zdHMud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3LyIsImlhdCI6MTU2NjMyMDcyNCwibmJmIjoxNTY2MzIwNzI0LCJleHAiOjE1NjYzMjQ2MjQsIl9jbGFpbV9uYW1lcyI6eyJncm91cHMiOiJzcmMxIn0sIl9jbGFpbV9zb3VyY2VzIjp7InNyYzEiOnsiZW5kcG9pbnQiOiJodHRwczovL2dyYXBoLndpbmRvd3MubmV0LzcyZjk4OGJmLTg2ZjEtNDFhZi05MWFiLTJkN2NkMDExZGI0Ny91c2Vycy8yNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEvZ2V0TWVtYmVyT2JqZWN0cyJ9fSwiYWNyIjoiMSIsImFpbyI6IkFVUUF1LzhNQUFBQUJmSVhrS1pUNXN2dGVyVzhpeVgyQ1JCODlJc2dTVFJtZFdPeHR0aFNMVXZzZEtwd0YxTmloNjFtcEdMYjRnNmxES01Md0lMTmtBSkhCblBCSithdU5BPT0iLCJhbXIiOlsicnNhIiwibWZhIl0sImFwcGlkIjoiMDRiMDc3OTUtOGRkYi00NjFhLWJiZWUtMDJmOWUxYmY3YjQ2IiwiYXBwaWRhY3IiOiIwIiwiZGV2aWNlaWQiOiIxZDUwYzVkZi1mZDAxLTRhNjQtODg1OS04NDcyMzc1OGEyNDQiLCJmYW1pbHlfbmFtZSI6IktsZWluIiwiZ2l2ZW5fbmFtZSI6IkJyYW5kb24iLCJpcGFkZHIiOiIxMzEuMTA3LjE1OS4yMiIsIm5hbWUiOiJCcmFuZG9uIEtsZWluIiwib2lkIjoiMjcyNDQ5MzUtYTRiOS00MGE0LWEyNzItNDI5NDJiNjdlY2YxIiwib25wcmVtX3NpZCI6IlMtMS01LTIxLTIxMjc1MjExODQtMTYwNDAxMjkyMC0xODg3OTI3NTI3LTMwODY5MTc0IiwicHVpZCI6IjEwMDMwMDAwQTkxNzc4OUUiLCJzY3AiOiJ1c2VyX2ltcGVyc29uYXRpb24iLCJzdWIiOiJaTnRJSW14ajVlSk9TbnJRTXh1UTFGeGVPOHhiYnVhQmFrU0FYYjRqRE84IiwidGlkIjoiNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3IiwidW5pcXVlX25hbWUiOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1cG4iOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1dGkiOiIzcU02WE1IMS1VeTc1OGREc3VFUUFBIiwidmVyIjoiMS4wIn0.6wF-URC5pN8R6lYNu887Vqul47X3Kpm5g_d0S6tYjtW42KcCv95dvXiWr3_xQ62vDBHLekWJUFTDt-JIa-7Jothw-k4LGIe4OyT3c5VeLMupH5fepX8puj3cfxUAubdUIwq3nw8XHksD979tOyFh_lOCyHPNs69UgpQUufHkX-262eCQjlQoXTigdmxd4uhW7ybcLKxTyIh16K8JI3tHU6lQQDeKGDVqgkXTWqAHWhlHiaZ8SYsfjV07lLS-YnBmjyM16WHnDCaUwDy326rKfbdsAS2r6br2NERDpX_yoq01rFxP1mzQrnokb7sAJBQbV5dqalO3kU0JwvcGwhO3hQ" - ], - "Content-Type": [ - "application/json; odata=minimalmetadata; charset=utf-8" - ], - "client-request-id": [ - "970fc98a-c371-11e9-9e75-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "475" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:24 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "3192f38a-2fca-4a50-aae5-8e6f63d89d69" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#taskaddresult\",\"value\":[\r\n {\r\n \"status\":\"Success\",\"taskId\":\"myTask\",\"eTag\":\"0x8D725957BB9A33F\",\"lastModified\":\"2019-08-20T17:40:25.2332863Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604/tasks/myTask\"\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "9788badc-c371-11e9-a8f9-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:25 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:3KTXlKFP3YAcXsdqP5TmifPeUTey8Y1cdLzFORFTfYM=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:24 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957B414460" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:24 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "7a6a6ffd-0c46-4bc7-8b2f-884173b7092f" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#jobs/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604\",\"eTag\":\"0x8D725957B414460\",\"lastModified\":\"2019-08-20T17:40:24.4444256Z\",\"creationTime\":\"2019-08-20T17:40:24.4307198Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:24.4444256Z\",\"priority\":0,\"usesTaskDependencies\":false,\"constraints\":{\r\n \"maxWallClockTime\":\"P10675199DT2H48M5.4775807S\",\"maxTaskRetryCount\":0\r\n },\"poolInfo\":{\r\n \"poolId\":\"ncj-ubuntu1604\"\r\n },\"executionInfo\":{\r\n \"startTime\":\"2019-08-20T17:40:24.4444256Z\",\"poolId\":\"ncj-ubuntu1604\"\r\n },\"onAllTasksComplete\":\"noaction\",\"onTaskFailure\":\"noaction\"\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604/tasks?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "9791539a-c371-11e9-9189-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:25 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:1Q8WdxtUeHtCsWjon6xIDaCjvDplImTwYK7Hu9hQd78=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:24 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "cb70c097-c58f-4ba5-971d-908574325587" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#tasks\",\"value\":[\r\n {\r\n \"id\":\"myTask\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604/tasks/myTask\",\"eTag\":\"0x8D725957BB9A33F\",\"creationTime\":\"2019-08-20T17:40:25.2332863Z\",\"lastModified\":\"2019-08-20T17:40:25.2332863Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:25.2332863Z\",\"commandLine\":\"/bin/bash -c \\\"echo test\\\"\",\"outputFiles\":[\r\n {\r\n \"filePattern\":\"$AZ_BATCH_TASK_DIR/*.txt\",\"destination\":{\r\n \"container\":{\r\n \"containerUrl\":\"https://sdkteststore2.blob.core.windows.net/aaatestcontainer?st=2019-08-20T17%3A39%3A08Z&se=2019-08-21T17%3A39%3A08Z&sp=rw&sv=2017-07-29&sr=c&sig=6MqdvD/tVrcMqBh/xoltuDEmtGg79s3sE1V3lcoFaoM%3D\"\r\n }\r\n },\"uploadOptions\":{\r\n \"uploadCondition\":\"TaskSuccess\"\r\n }\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"nonadmin\"\r\n }\r\n },\"constraints\":{\r\n \"maxWallClockTime\":\"P10675199DT2H48M5.4775807S\",\"retentionTime\":\"PT1H\",\"maxTaskRetryCount\":0\r\n },\"executionInfo\":{\r\n \"retryCount\":0,\"requeueCount\":0\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604/tasks?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "99652236-c371-11e9-b2ed-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:28 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:f5M9JAiNFf7w2EN5SpgyyCQyoVty/f9fuOM7oIJIzgI=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:27 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "3c348f00-c2da-4885-bedc-960375b3943e" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#tasks\",\"value\":[\r\n {\r\n \"id\":\"myTask\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604/tasks/myTask\",\"eTag\":\"0x8D725957BB9A33F\",\"creationTime\":\"2019-08-20T17:40:25.2332863Z\",\"lastModified\":\"2019-08-20T17:40:25.2332863Z\",\"state\":\"completed\",\"stateTransitionTime\":\"2019-08-20T17:40:26.353947Z\",\"previousState\":\"running\",\"previousStateTransitionTime\":\"2019-08-20T17:40:25.735322Z\",\"commandLine\":\"/bin/bash -c \\\"echo test\\\"\",\"outputFiles\":[\r\n {\r\n \"filePattern\":\"$AZ_BATCH_TASK_DIR/*.txt\",\"destination\":{\r\n \"container\":{\r\n \"containerUrl\":\"https://sdkteststore2.blob.core.windows.net/aaatestcontainer?st=2019-08-20T17%3A39%3A08Z&se=2019-08-21T17%3A39%3A08Z&sp=rw&sv=2017-07-29&sr=c&sig=6MqdvD/tVrcMqBh/xoltuDEmtGg79s3sE1V3lcoFaoM%3D\"\r\n }\r\n },\"uploadOptions\":{\r\n \"uploadCondition\":\"TaskSuccess\"\r\n }\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"nonadmin\"\r\n }\r\n },\"constraints\":{\r\n \"maxWallClockTime\":\"P10675199DT2H48M5.4775807S\",\"retentionTime\":\"PT1H\",\"maxTaskRetryCount\":0\r\n },\"executionInfo\":{\r\n \"startTime\":\"2019-08-20T17:40:25.735322Z\",\"endTime\":\"2019-08-20T17:40:26.353947Z\",\"exitCode\":0,\"result\":\"success\",\"retryCount\":0,\"requeueCount\":0\r\n },\"nodeInfo\":{\r\n \"affinityId\":\"TVM:tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"nodeUrl\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604/nodes/tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"poolId\":\"ncj-ubuntu1604\",\"nodeId\":\"tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"taskRootDirectory\":\"workitems/ncj-ubuntu1604/job-1/myTask\",\"taskRootDirectoryUrl\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604/nodes/tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d/files/workitems/ncj-ubuntu1604/job-1/myTask\"\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604/tasks/myTask?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "996e12a4-c371-11e9-9283-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:28 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:kjz4cnyJqmQyihVdFcYxvlJ6weYutUggc+GbppWvBZk=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:27 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957BB9A33F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:25 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "5fbfe903-c7e6-4fd6-b494-7c6b581dbfbd" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#tasks/@Element\",\"id\":\"myTask\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604/tasks/myTask\",\"eTag\":\"0x8D725957BB9A33F\",\"creationTime\":\"2019-08-20T17:40:25.2332863Z\",\"lastModified\":\"2019-08-20T17:40:25.2332863Z\",\"state\":\"completed\",\"stateTransitionTime\":\"2019-08-20T17:40:26.353947Z\",\"previousState\":\"running\",\"previousStateTransitionTime\":\"2019-08-20T17:40:25.735322Z\",\"commandLine\":\"/bin/bash -c \\\"echo test\\\"\",\"outputFiles\":[\r\n {\r\n \"filePattern\":\"$AZ_BATCH_TASK_DIR/*.txt\",\"destination\":{\r\n \"container\":{\r\n \"containerUrl\":\"https://sdkteststore2.blob.core.windows.net/aaatestcontainer?st=2019-08-20T17%3A39%3A08Z&se=2019-08-21T17%3A39%3A08Z&sp=rw&sv=2017-07-29&sr=c&sig=6MqdvD/tVrcMqBh/xoltuDEmtGg79s3sE1V3lcoFaoM%3D\"\r\n }\r\n },\"uploadOptions\":{\r\n \"uploadCondition\":\"TaskSuccess\"\r\n }\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"nonadmin\"\r\n }\r\n },\"constraints\":{\r\n \"maxWallClockTime\":\"P10675199DT2H48M5.4775807S\",\"retentionTime\":\"PT1H\",\"maxTaskRetryCount\":0\r\n },\"executionInfo\":{\r\n \"startTime\":\"2019-08-20T17:40:25.735322Z\",\"endTime\":\"2019-08-20T17:40:26.353947Z\",\"exitCode\":0,\"result\":\"success\",\"retryCount\":0,\"requeueCount\":0\r\n },\"nodeInfo\":{\r\n \"affinityId\":\"TVM:tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"nodeUrl\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604/nodes/tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"poolId\":\"ncj-ubuntu1604\",\"nodeId\":\"tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"taskRootDirectory\":\"workitems/ncj-ubuntu1604/job-1/myTask\",\"taskRootDirectoryUrl\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604/nodes/tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d/files/workitems/ncj-ubuntu1604/job-1/myTask\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/aaatestcontainer?restype=container&comp=list", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "99764ab4-c371-11e9-abe7-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:40:28 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:OdeQVi6+xPhT8YGyuiEPEuDbSyHX7JmyZbN8wIPjoKw=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:27 GMT" - ] - }, - "body": { - "string": "\ufefffileuploaderr.txtTue, 20 Aug 2019 17:40:26 GMT0x8D725957C5D6B230application/octet-stream1B2M2Y8AsgTpgAmY7PhCfg==BlockBlobHottrueunlockedavailabletruefileuploadout.txtTue, 20 Aug 2019 17:40:26 GMT0x8D725957C4DFF0C506application/octet-streamJep6Lf8xO8ufndlZJO8P8A==BlockBlobHottrueunlockedavailabletruestderr.txtTue, 20 Aug 2019 17:40:26 GMT0x8D725957C541AA10application/octet-stream1B2M2Y8AsgTpgAmY7PhCfg==BlockBlobHottrueunlockedavailabletruestdout.txtTue, 20 Aug 2019 17:40:26 GMT0x8D725957C5BE4415application/octet-stream2Oj8otwPiW/Xy0ywAxuiSQ==BlockBlobHottrueunlockedavailabletrue" - } - } - }, - { - "request": { - "method": "DELETE", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "9981707a-c371-11e9-bcf8-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "0" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:28 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:5687sZHEC7H80334YTvh+1WSSh5p6g7tNjrfFvLnA8k=" - ] - } - }, - "response": { - "status": { - "code": 202, - "message": "Accepted" - }, - "headers": { - "Date": [ - "Tue, 20 Aug 2019 17:40:27 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "2bd4fe3b-909c-4617-91c7-adc65b11c304" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/aaatestcontainer?restype=container&comp=list", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "99897dfe-c371-11e9-80a1-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:40:28 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:X0g/G2R0yz7yZlccdKrWMZlebmbnM28c0x+eH/bQRLg=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:28 GMT" - ] - }, - "body": { - "string": "\ufefffileuploaderr.txtTue, 20 Aug 2019 17:40:26 GMT0x8D725957C5D6B230application/octet-stream1B2M2Y8AsgTpgAmY7PhCfg==BlockBlobHottrueunlockedavailabletruefileuploadout.txtTue, 20 Aug 2019 17:40:26 GMT0x8D725957C4DFF0C506application/octet-streamJep6Lf8xO8ufndlZJO8P8A==BlockBlobHottrueunlockedavailabletruestderr.txtTue, 20 Aug 2019 17:40:26 GMT0x8D725957C541AA10application/octet-stream1B2M2Y8AsgTpgAmY7PhCfg==BlockBlobHottrueunlockedavailabletruestdout.txtTue, 20 Aug 2019 17:40:26 GMT0x8D725957C5BE4415application/octet-stream2Oj8otwPiW/Xy0ywAxuiSQ==BlockBlobHottrueunlockedavailabletrue" - } - } - }, - { - "request": { - "method": "DELETE", - "uri": "https://sdkteststore2.blob.core.windows.net/aaatestcontainer/fileuploaderr.txt", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "99941f9c-c371-11e9-8eb4-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:40:28 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:uouaMWiladXCm7Fm9hHRBgfgecPEKF/yUmgwQY3MLfE=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 202, - "message": "Accepted" - }, - "headers": { - "Date": [ - "Tue, 20 Aug 2019 17:40:28 GMT" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "x-ms-delete-type-permanent": [ - "true" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "DELETE", - "uri": "https://sdkteststore2.blob.core.windows.net/aaatestcontainer/fileuploadout.txt", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "999ae210-c371-11e9-813e-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:40:28 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:/LsUUYhaMdhrREEgPAWpioJyQcHdFXQrAnnxx+hiUbM=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 202, - "message": "Accepted" - }, - "headers": { - "Date": [ - "Tue, 20 Aug 2019 17:40:28 GMT" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "x-ms-delete-type-permanent": [ - "true" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "DELETE", - "uri": "https://sdkteststore2.blob.core.windows.net/aaatestcontainer/stderr.txt", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "99a2c3a8-c371-11e9-9c6e-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:40:28 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:3QYhqVl9rZO0TEZLAMuamNpL83PakIF/m9S0qy1GFDQ=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 202, - "message": "Accepted" - }, - "headers": { - "Date": [ - "Tue, 20 Aug 2019 17:40:28 GMT" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "x-ms-delete-type-permanent": [ - "true" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "DELETE", - "uri": "https://sdkteststore2.blob.core.windows.net/aaatestcontainer/stdout.txt", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "99a9f3f4-c371-11e9-a965-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:40:28 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:jQNFIhbu4L+M5OQEmeY3aW5mCvW/WfmRYK33Du50oY0=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 202, - "message": "Accepted" - }, - "headers": { - "Date": [ - "Tue, 20 Aug 2019 17:40:28 GMT" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "x-ms-delete-type-permanent": [ - "true" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/supportedimages?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "99b0d606-c371-11e9-a8db-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:28 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:5Lbd2U04jG1knPmswGpVcJ75QCxFDrqTXWU2mAvcFcM=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:28 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "1fdefa5d-9bb9-4e03-b255-5118ce132010" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#supportedimages\",\"value\":[\r\n {\r\n \"imageReference\":{\r\n \"publisher\":\"batch\",\"offer\":\"rendering-centos73\",\"sku\":\"rendering\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"batch\",\"offer\":\"rendering-windows2016\",\"sku\":\"rendering\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04.0-lts\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"18.04-lts\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 18.04\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"credativ\",\"offer\":\"debian\",\"sku\":\"8\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.debian 8\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"credativ\",\"offer\":\"debian\",\"sku\":\"8-backports\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.debian 8\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"credativ\",\"offer\":\"debian\",\"sku\":\"9\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.debian 9\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"credativ\",\"offer\":\"debian\",\"sku\":\"9-backports\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.debian 9\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-ads\",\"offer\":\"linux-data-science-vm\",\"sku\":\"linuxdsvm\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-ads\",\"offer\":\"standard-data-science-vm\",\"sku\":\"standard-data-science-vm\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"centos-container\",\"sku\":\"7-4\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"NvidiaGridDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"centos-container\",\"sku\":\"7-5\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"NvidiaGridDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"centos-container\",\"sku\":\"7-6\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"NvidiaGridDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"centos-container-rdma\",\"sku\":\"7-4\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"SupportsRDMAOnly\",\"IntelMPIRuntimeInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"ubuntu-server-container\",\"sku\":\"16-04-lts\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"NvidiaGridDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"ubuntu-server-container-rdma\",\"sku\":\"16-04-lts\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"SupportsRDMAOnly\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-dsvm\",\"offer\":\"azureml\",\"sku\":\"runtime\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-dsvm\",\"offer\":\"dsvm-windows\",\"sku\":\"server-2016\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-dsvm\",\"offer\":\"linux-data-science-vm-ubuntu\",\"sku\":\"linuxdsvmubuntu\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2008-r2-sp1\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2008-r2-sp1-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-datacenter\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-datacenter-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2016-datacenter\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2016-datacenter-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2016-datacenter-with-containers\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-core\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-core-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-core-with-containers\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-core-with-containers-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-with-containers\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-with-containers-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserversemiannual\",\"sku\":\"datacenter-core-1709-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserversemiannual\",\"sku\":\"datacenter-core-1709-with-containers-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserversemiannual\",\"sku\":\"datacenter-core-1803-with-containers-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserversemiannual\",\"sku\":\"datacenter-core-1809-with-containers-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.0\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.1\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.2\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.3\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.4\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.5\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.6\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos-hpc\",\"sku\":\"7.1\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"SupportsRDMAOnly\",\"IntelMPIRuntimeInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos-hpc\",\"sku\":\"7.3\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"SupportsRDMAOnly\",\"IntelMPIRuntimeInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos-hpc\",\"sku\":\"7.4\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"SupportsRDMAOnly\",\"IntelMPIRuntimeInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos-hpc\",\"sku\":\"7.6\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"SupportsRDMAOnly\",\"IntelMPIRuntimeInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"oracle\",\"offer\":\"oracle-linux\",\"sku\":\"7.3\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"oracle\",\"offer\":\"oracle-linux\",\"sku\":\"7.4\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"oracle\",\"offer\":\"oracle-linux\",\"sku\":\"7.5\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"oracle\",\"offer\":\"oracle-linux\",\"sku\":\"7.6\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools?api-version=2019-08-01.10.0", - "body": "{\"id\": \"ncj-windows-2012-r2\", \"vmSize\": \"STANDARD_D1_V2\", \"virtualMachineConfiguration\": {\"imageReference\": {\"publisher\": \"microsoftwindowsserver\", \"offer\": \"windowsserver\", \"sku\": \"2012-r2-datacenter\"}, \"nodeAgentSKUId\": \"batch.node.windows amd64\"}, \"targetDedicatedNodes\": 1}", - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Content-Type": [ - "application/json; odata=minimalmetadata; charset=utf-8" - ], - "client-request-id": [ - "99ba509e-c371-11e9-a532-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "277" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:28 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:F2ymTkGpnEg0xP9IR60qQmr3OHGlhTiSq0Hnp3bsT9U=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "DataServiceId": [ - "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:28 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Location": [ - "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2" - ], - "Transfer-Encoding": [ - "chunked" - ], - "request-id": [ - "d9ad6909-02b7-45e0-a10e-d638b2cbae0c" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "99ccccb4-c371-11e9-a38a-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:R00scbBql3EXY3jDxoF3lh6htFokFsduKojkwub7P/4=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:28 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "c0cc9e32-83d6-451e-a0e6-8eab51181ea9" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "9b9e8422-c371-11e9-9593-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:32 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:gA6yzFz+4vzX3/TIb/ejzicawcAL2hbI9G6BhiyjCQE=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:32 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "046d06a5-d922-4e6d-8c21-46072582c220" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "9d71e2b6-c371-11e9-9baa-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:35 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:7ziKU0DiAti+aHDYLd0p+cEWe4GZ3wrEvvH73YyDzOk=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:35 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "4f4220ac-ad45-438c-a420-517dc2f9ba1c" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "9f44241e-c371-11e9-909f-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:38 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:XCV60+Ux6Oo7VWaESbxLAw6mZPbjDwki3LrJd9BQrVw=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:38 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "17d9f944-6b96-437f-bc60-8fb7444ae06b" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "a11605b0-c371-11e9-9927-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:41 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:SORMvNF/GI3yMNUU+gOjpwpEh531Ad+dDj6+peJCpCw=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:40 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "bd1c5c9d-167d-40c7-af1e-67c298dde726" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "a2e895f6-c371-11e9-87f9-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:44 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:SKcM0NMj/S082YpsWV+zwMIOUw34ff/ytLXPS5XCwu4=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:43 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "186223c8-11b2-4ff8-a83b-81b46eab7984" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "a4badde8-c371-11e9-b572-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:47 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:s3mqw1QmcaimPPz3n6jzL2LqRZpVeNsgbDarKokmrJs=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:46 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "0282ad5c-454e-407f-937b-f8b99f3b1b0b" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "a68d5130-c371-11e9-9a2c-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:50 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:2zqiLk5lBq1WJkCSIqGtnWA6+xtGMqSUgIIp5+fXmBI=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:49 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "53a9a57b-cb96-4c96-bd40-544ad12caf6a" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "a85f19ac-c371-11e9-8914-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:53 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:haRpSaUADnkH2ZE8hT9Kh50FNvdiItTI/0jYfJ4m4J8=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:53 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "41e77d90-86fb-4ce1-842b-8c173c2145b3" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "aa31141e-c371-11e9-ae6a-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:56 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:uhFAl0PA2H7QUMGnOC6fshsKusxLv28QtuwV60CJn6w=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:55 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "3ca06436-ec0d-4ccf-aca9-d63e9b4321e1" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "ac033188-c371-11e9-abe8-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:40:59 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:OvCMjgcpdtrevjpNm2wjfboeoKyTNY9wY94av3hTBJ0=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:40:58 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "fa79a053-365c-4cf0-9b3f-b807135b400a" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "add5aacc-c371-11e9-9996-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:41:02 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:RWLCsz78rGj2S6B+WoPXM1ANQ2PugH2hDiMMTW2bzYU=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:41:01 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "350223fe-105c-4b54-b1d8-3263b4d7ab1f" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "afa848b0-c371-11e9-9df4-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:41:05 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:8b5fQGpGdv+TwR/FMpfCxHupug9LV2Ky4Dz9N4ssp8Y=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:41:05 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "31977adb-2654-4d25-be0d-caeab0036d85" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "b17aeefa-c371-11e9-8729-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:41:08 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:z93KVdbtdc8lLVmy07XPrrfRcapXplsAzckOVwWo8fU=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:41:08 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "7c012e5f-4403-4200-93ec-666197061a49" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "b34cc8b0-c371-11e9-9988-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:41:11 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:WmBXsObj794YpGObTmNVSfPqQ8x/YwQ3dbHxbqvswgs=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:41:11 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "611ad7ee-232f-49b8-98a2-5a652149f5b9" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "b51fb57a-c371-11e9-b5df-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:41:14 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:1R5+vgztVsSDSvk7v68O64407eB4wqwaQd70+2gjey0=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:41:14 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "6de937d7-3197-464b-8f41-d99e0602d363" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "b6f5dbd8-c371-11e9-9ce9-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:41:17 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:Iz2lo03+ENzJP5UG9/g1jRfjY/lWrKBePa/mlCaGfG4=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:41:17 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "243526a3-292b-4bdb-9494-5dae867f7625" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "b8c790d4-c371-11e9-810b-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:41:21 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:JBfCCTq7ByHvfQ/R/D8kz5l1CmOKMq0btOzw1SFntKU=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:41:20 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "2dcae9dc-7872-4ebe-aa7b-84d2be0c1561" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "ba9cfcf4-c371-11e9-9112-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:41:24 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:8n9E9x4oXO2p4gLFbTS0dAsaW8dMCPZGJ5TieVHlBgY=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:41:24 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "1cf02d2e-5a03-4848-98a1-530a95c9dad8" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "bc70f946-c371-11e9-8b40-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:41:27 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:7Zbbp0teBjb0tpn5+UZ++UhAkSV7e2Ua5+P7PI+GRM8=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:41:27 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "816c02da-cd4b-4548-97d0-df8209ea7112" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "be4620ca-c371-11e9-a342-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:41:30 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:vNBjYK+BfJgQynp0+N8rkhD5uFpnlQ+4t7WhSxy5Grs=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:41:30 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "6eb5e6fa-2893-44dd-bf0b-ee8de06b1873" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "c0193368-c371-11e9-be82-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:41:33 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:gEsfvJbcFeWb89NAhIFJ1X4I9eW1SBENctg+t70Ye0k=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:41:32 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "e45fab0c-8bff-418d-a697-42157072a3fc" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "c1ed4f62-c371-11e9-8e3d-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:41:36 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:0cJQ1tfCuB2w5oNHZv5t7KJbhsr0/ZwGpNAnEWhTcRk=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:41:36 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "b4c0dfbd-d542-4d79-89d9-803f64a513bc" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "c3bfe57a-c371-11e9-b983-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:41:39 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:e/pNhD4XDiulkSl6qTWw6rgh1mOTWzpZcFPAPR5XQCA=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:41:39 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "82577bc2-8abe-4d7b-b2bb-3e049cab69b1" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "c5919390-c371-11e9-8d44-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:41:42 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:eM2sMOfGcdYtJA7EThdh1/31ZMh1Gt4OW45TnHH66pU=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:41:42 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "3f67631a-11f5-4171-8a0b-1a4f26048467" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "c768463e-c371-11e9-9cfb-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:41:45 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:oVfK3RBatFluF1vNt2IwNLyqnuIDtPYs4lAyQBdOqw4=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:41:45 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "37d8d050-7610-4a30-b790-9c4927f5489f" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"steady\",\"allocationStateTransitionTime\":\"2019-08-20T17:41:45.3536086Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":1,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "c76f61c2-c371-11e9-b91f-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:41:45 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:LC0+Dq9RuXAI0CY9GuFbKa0YabG6WD7mVs04Pjwc1xI=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:41:45 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "40da48b2-bf61-46fa-8dd1-6728fa331767" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "c9422358-c371-11e9-a019-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:41:48 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:CdBCg3Yjq02xBZeBuTMVoAPvZNTzVyREmRfVdY+Iyt8=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:41:48 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "931d13ee-227b-40e3-be69-99a7cd69b345" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "cb15e3a6-c371-11e9-a2f9-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:41:51 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:6AEffiDAb2MKwMfr5qgqB8tDjXt6y0B6Ozsi7ZEuMfc=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:41:51 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "b9e3cf22-37a5-41be-b4bb-f9a0595f9808" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "cce855fe-c371-11e9-a609-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:41:54 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:ZTmM0rwEtOjhIDjPT5x2AUeAEnciG/eyRgMzXHIV1Sw=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:41:54 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "dae9c1a8-baac-4cef-af2a-2c8e13124f71" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "cebbae40-c371-11e9-9eae-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:41:57 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:+6RnyXkoqecDvSIBc5j0JOp/l8ZdyaRnsVY3gsTX4F8=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:41:57 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "16ed8b4b-d7bb-4244-980c-7fa935bdd260" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "d08eb23a-c371-11e9-bfe7-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:42:00 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:hgI5TueDMWPo3wnpTU6DsEHYAmV0+lqfRYySYn6j3jc=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:42:00 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "cc5e8a18-34b0-4f76-8e3c-e88c4e0b2187" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "d260f88a-c371-11e9-a64d-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:42:03 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:fy9kcRWAiCXjZBlcSLfwX0DiKq6YBYYnAC8Geh82eKo=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:42:04 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "3f58417b-80a2-49ce-9f55-d2afbcbfc791" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "d43535be-c371-11e9-ab4a-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:42:07 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:wzATs8D3P/s3GF8d8ETf6vm3R6Pobt0JJ8xqATNHAQs=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:42:06 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "8f008fbb-1105-4863-a50d-2130b3a639be" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "d607ebe4-c371-11e9-b8d7-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:42:10 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:zmqG5SVF/72FVIdjAN5hPPvzwqsnziv0KOgMtGo3wrA=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:42:10 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "c171ef60-7d9a-45b9-8605-15bfe87b52e1" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "d7dbaf38-c371-11e9-8fac-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:42:13 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:b631qD+KlnzNeRly0Qqw3RB7kSw+zr3vje/R5DqaPKY=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:42:13 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "177a9bc0-cf1b-48e2-b3e2-7885ccc5e8d7" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "d9b033f4-c371-11e9-bbfa-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:42:16 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:QHfDCJdqEkwftyj3x+yVr7ZmzfcDCS72pPTROYrYclo=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:42:16 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "344ef1d0-fb3f-4f2f-89ae-6e4b93f38f63" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "db8400be-c371-11e9-940f-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:42:19 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:czuUt7wZd9K4zZsGgesxjhPzm84pIN2920tMcSYhTE4=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:42:18 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "e7729d35-44dd-40ce-b896-34e474cd0062" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "dd57d13e-c371-11e9-944a-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:42:22 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:tJGY6VJxAXbxLNrfxVcPLj5lQK56aUep72p1dAQvxaQ=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:42:22 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "3a7db32f-38ed-4e29-a7c8-faa2cf38f1d3" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "df2a3dac-c371-11e9-8874-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:42:25 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:n6DYbf3giEHgKp15R64ktuO3zqOPMGr6Apk/dMYDVs8=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:42:24 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "102f2791-7f22-4435-97b3-3c36cf69b952" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "e0fd0b70-c371-11e9-888d-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:42:28 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:5muWSggKs0Fic7NUjHA8AHXwkEvSJkzpOtU+ZtZTNkA=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:42:27 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "527db8df-4cc9-4e79-b862-64a9e7b0715a" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "e2d11080-c371-11e9-ab3d-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:42:31 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:TRWXJy3jd0hefXfPMlF7GzvbyDLk5N5mrm4KpSMOjpw=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:42:30 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "52dfe9d3-2042-4def-8d4f-a5efe27a525b" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "e4a48f0a-c371-11e9-aee1-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:42:34 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:2TcBiFnrIQWV3BCk6wzZlzL14ZM5BYOt9/hl+IfyihA=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:42:34 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "1379cd55-e2d2-4b95-b1f7-a8d2937ba403" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "e677ae08-c371-11e9-9b32-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:42:37 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:cFlPgsCv6Z1zk9uR3EGx54hWYAfUIAVeyZlDv9U4vLY=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:42:37 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "e2db0c84-967b-439c-a4a9-bd7112c6b6fb" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "e84a1cae-c371-11e9-8048-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:42:40 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:eUMDdOBJ701z5jDsRl3OKTr9Lx5ABFCWExnMTAt7up8=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:42:40 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "ccc04524-db84-406d-972e-9e99228aaa59" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "ea1c46ee-c371-11e9-8268-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:42:43 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:we7/a3isIFKApsLnCbIllEWmjZ2gRksnq9zfDtwkJVY=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:42:43 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "0738a939-3ca9-4e42-8d81-f50dca417b49" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "ebef1988-c371-11e9-a55b-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:42:46 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:HkIIPzsNmyEXS/w2ne6HQjn6Xb6m/nSuAn92GAVw2cY=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:42:46 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "c8c56492-3800-43a1-b2b7-3e502dad8a8c" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "edc15dc6-c371-11e9-b809-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:42:49 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:Hfdn/yPCwk/uhZOKAlM53CzH2r2hDidUBp66SucCXhE=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:42:49 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "c341a0d6-c654-4eb1-9f53-04036bfe66b8" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "ef945d98-c371-11e9-b2b6-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:42:52 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:1JM/HfLaOGq4cvkqgQRkBm7NIX0ksrZHDz3PivPW0lo=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:42:53 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "59fcf763-f82b-4d83-aaee-ac5ad6e3cf4c" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "f1672450-c371-11e9-b063-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:42:56 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:cq125RybWWMRJfNLUcMX46KtwB93My8nxem4rvH0Wgw=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:42:56 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "1e06e582-caa1-4ee5-a815-9498ae7fc446" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "f33a9df6-c371-11e9-b0b3-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:42:59 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:OJ1VOG8/H+J9MXg4DHluInpY6WeRygZa9tBHFyQolA8=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:42:59 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "f0bae67d-de0c-4ec6-8381-3bd693c5aeae" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "f5117d22-c371-11e9-bf41-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:43:02 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:KfV0Ryl3d77YVYXjyS6tqiQkMHXln0UZWsYJ79z6NJE=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:43:01 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "165a8916-dc36-4988-86d9-c33274820fb8" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "f6e49c36-c371-11e9-9e67-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:43:05 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:w4+G0iajCf8AM2Y6eDL0KQ3FTpjQbAuytH+xfr2WOmU=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:43:05 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "eff0dc42-fc78-41ea-8414-fd5a01d7baed" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "f8b7cc00-c371-11e9-970b-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:43:08 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:deswNwPf5Iqwtnkmu4DPpjsVD3MYy9eEysIDYNGUSMY=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:43:07 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "ee0a7c77-b789-480b-b01e-01ccb469c48f" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "fa8a304a-c371-11e9-8fdb-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:43:11 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:A5gcpOL45VaxZo0kTYTwzPyPsnUfQiSEd/X1/8lIPtE=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:43:10 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "2b44011f-5547-4eb8-a88c-17bb67c4d72a" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "fc5d991c-c371-11e9-940e-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:43:14 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:LDlEHWCoIUL7cV16//NzDzUadkoPhV9suleSPV5Qo2E=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:43:13 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "0a083b2f-c4ea-4857-b63e-3f0fc1d2b878" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "fe3075b4-c371-11e9-8c68-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:43:17 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:MiV6TED3WJsA1bSI4ILvaZ6b5CURXbFIUYrlTeYzz2I=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:43:16 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "0ede8f38-087c-4f09-bfd0-d22a9226c3d2" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "0002f8ec-c372-11e9-9bc6-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:43:20 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:A0IRJdHRCA9PlG5r+hjl1BWEjxrudg1SPLlERpTf3po=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:43:20 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "100b2c6b-7136-4b53-aade-ee5f621a6a90" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "01d654fa-c372-11e9-b7f1-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:43:23 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:6ZcO/H5QTo0CKUOphAGD1KA05AE2B6Px8QBbT/uTp3A=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:43:22 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "ed4ae2d1-93d2-451d-af62-4de9837a532e" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "03a92858-c372-11e9-ad53-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:43:26 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:anHrO7CpfrRhBS59HfD89cGFFRDgMegNTmhTmVkfI68=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:43:25 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "647f2d86-17cc-4e84-8052-c23938111b67" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "057bde12-c372-11e9-a7bf-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:43:29 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:43Tx8vGzumrFvGvjMm+uy9KAEGVd3uij1n7FhoWRsYs=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:43:29 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "95c63292-9a3e-4ceb-804f-2999166b7f90" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "074e515e-c372-11e9-bbf4-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:43:32 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:2swh41u8OCgVnZDv0Gj6O8pNEpl0rr7BZVzdxef4GaU=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:43:32 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "27e2d0b4-acf1-4058-bd34-827961039d79" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "092118f6-c372-11e9-bc3b-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:43:35 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:3/5JG2BYPUfPhILvOv7uRm+jNFGesppGVi5pa2REH8g=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:43:35 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "b41df8cc-2aa7-4239-8f19-432b08418d3b" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "0af451ee-c372-11e9-af1e-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:43:38 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:aWO1kxKH8KpJx05kbnptjFm0dzRggkVX387YOOICJsc=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:43:38 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "956ea9cc-439b-4941-bb13-36b760afad06" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "0cc69070-c372-11e9-9710-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:43:41 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:Lkr1pQLJEVtiPEmdcsX0q8gK0qS9oUw6Vt2O7GjPWeo=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:43:41 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "2bf64980-c5ad-4985-95b6-e44c5ffac385" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "0e9916c0-c372-11e9-a288-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:43:45 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:5t9mY1x+JCtteK2lWfBQEx+6uDLVwuQxp0a1SQsKjY8=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:43:44 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "185daa7e-e4ff-439e-b1a5-0f3719673f46" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "106db0cc-c372-11e9-a6d7-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:43:48 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:kZ3/kI3PK+0B8N3EhkbbMI/0lWQz0LUlGIp8JKL69hA=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:43:48 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "59355773-648e-4506-bc30-5cef790f6289" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "1240f50c-c372-11e9-9d81-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:43:51 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:9XudOJxRe/5xKato7GWEhlN25a8IXxJFucvBDgfJliA=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:43:50 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "0cabbac9-aa98-4ffc-9a39-4c81d3b4fdf5" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "141331fa-c372-11e9-9fbd-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:43:54 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:TghPHMFKOq2juoGk+h4Ppl3LwZ2b4yYtEocD7J4LDnE=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:43:53 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "0aded115-b4d6-4501-86d6-c883b783a4ff" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "15e69224-c372-11e9-9db9-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:43:57 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:zSRaxLFDrdwST//Qk9efueZBuz3rR7IGRQWhsGy7+jc=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:43:56 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "7e1ea6dc-15a2-4454-890a-b7fd25cb792c" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "17bdfecc-c372-11e9-b290-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:44:00 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:s6ZRmzJD7IMeZRsmuU6OG2Xr+orU0B/dtMFoHi0Un50=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:00 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "de879a37-4872-4470-9588-a82935ccddf1" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "199081c6-c372-11e9-9f92-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:44:03 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:p84B27l9Y254IxDSWLlQ1H6ITDvjvM/uHCkx34FM0Hk=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:03 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "c6b1096b-b29b-44ca-8d59-7445640165ad" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "1b63f190-c372-11e9-bc6c-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:44:06 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:76qFMzDtMbnjeLaFnNq/9aHs615SjdFn9CUANtSPfpA=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:05 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "6c0dc042-2079-487a-8701-462d1d80dc1f" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "1d35e510-c372-11e9-b346-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:44:09 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:UkG9ARtwQmP3qx9+lGNnfNXuCOVh/UIAYzycPnCYeSo=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:09 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "0bd89020-51f2-410c-871b-a6b90012c000" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "1f080276-c372-11e9-a864-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:44:12 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:pMXET6AZ26rD7hR6SYbzzUD/VIY8PkiyOSmKojdKfDA=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:11 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "175ec19a-31df-4ff5-8c0e-c9128fe1e2da" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "20db7c8c-c372-11e9-a64d-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:44:15 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:BdUHaC1rvH8Grk8Dy/yePosRiM3Oy9tTtHtsya02QM4=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:15 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "e87104e8-42fd-437a-bcc4-b0b1e339368d" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "22af0a52-c372-11e9-9b9c-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:44:18 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:e3ycw+PeF3KgtdFitwGde3LFK+7eiAO8tUlKTJNvxaE=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:18 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "0d3fd65b-1c07-47f6-95c4-efb48b2bcb1b" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "2481aa1c-c372-11e9-9d21-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:44:21 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:IcQDc1P9G1Q5kXdiilOyEfuvTTO4i8aH58WljRw98qU=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:21 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "9f18a63c-33de-40b9-9708-fa4fb04ac92c" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "26542408-c372-11e9-8794-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:44:24 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:ovPEZPTLdNSV4zgo82RzUPdBlr31+RSCApNDQerzB7s=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:24 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "98577927-d844-46c2-9b8b-6855ee1cafe3" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "28267dd4-c372-11e9-9f41-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:44:27 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:ED8tzvugMXqaK9XURD1uFMXtV4U9sgHLBtUh32z1Dvc=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:27 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "9363001d-6450-4d30-929e-538cedf7ce3a" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "29f8974c-c372-11e9-af48-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:44:30 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:0k5sxVHU+GypTf04ZBW9qSx5098jDY9Xi/Rw6yzjt1k=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:30 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "5a0145fc-2a95-458e-8841-d2ca5a239e88" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "2bcb65d8-c372-11e9-b273-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:44:33 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:6J7JmPDTeHUCTVu7JJPCulYAuudpJFv2tEXpU1MICXE=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:33 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "22f54385-26f6-40b3-aa2d-7c7d04d13ab3" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "2d9edc6e-c372-11e9-894b-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:44:37 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:hsFSDU/94pWmsN6Keg2CKpQirP2laesTYB6xzPX8yTE=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:37 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "7ba7b751-fe55-411f-8cbc-5e99b648a5e8" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "2f718f36-c372-11e9-818f-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:44:40 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:D0olf8xEQUQGNjZy1ND64MVgHogC/a44N9ahKfYWUos=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:40 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "49fcd5e7-5f25-4195-ac9e-c8e2686bf616" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "314494e6-c372-11e9-97b7-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:44:43 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:w35GmR8i0d25XonEMb5qzQXKgc7YCp6p7BSIOfw32n4=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:42 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "cd6fe55f-c903-40ea-8611-b119d4de239d" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "3316fba2-c372-11e9-9735-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:44:46 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:qv7VwP4MkjXCyv6NXCOf4xbRRQYSBRrNlx9rJ5FrWKA=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:45 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "82aa06ab-98df-4869-85ee-dac2a5298275" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"starting\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:41:45.3034248Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "34ea90d4-c372-11e9-a91e-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:44:49 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:1HgGOzTvkHP8UP1TWcx8R0jL6xK3zIPmhV3TgI9Q0Sk=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:49 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "ed7fb884-05be-43cb-bb46-1ac1b6756194" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"idle\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:44:48.0148Z\",\"lastBootTime\":\"2019-08-20T17:44:47.433702Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":0,\"totalTasksSucceeded\":0,\"runningTasksCount\":0,\"certificateReferences\":[\r\n \r\n ],\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n },\"nodeAgentInfo\":{\r\n \"lastUpdateTime\":\"2019-08-20T17:44:47.433702Z\",\"version\":\"1.6.4\"\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs?api-version=2019-08-01.10.0&timeout=30", - "body": "{\"id\": \"ncj-windows-2012-r2\", \"poolInfo\": {\"poolId\": \"ncj-windows-2012-r2\"}}", - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL2JhdGNoLmNvcmUud2luZG93cy5uZXQvIiwiaXNzIjoiaHR0cHM6Ly9zdHMud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3LyIsImlhdCI6MTU2NjMyMDcyNCwibmJmIjoxNTY2MzIwNzI0LCJleHAiOjE1NjYzMjQ2MjQsIl9jbGFpbV9uYW1lcyI6eyJncm91cHMiOiJzcmMxIn0sIl9jbGFpbV9zb3VyY2VzIjp7InNyYzEiOnsiZW5kcG9pbnQiOiJodHRwczovL2dyYXBoLndpbmRvd3MubmV0LzcyZjk4OGJmLTg2ZjEtNDFhZi05MWFiLTJkN2NkMDExZGI0Ny91c2Vycy8yNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEvZ2V0TWVtYmVyT2JqZWN0cyJ9fSwiYWNyIjoiMSIsImFpbyI6IkFVUUF1LzhNQUFBQUJmSVhrS1pUNXN2dGVyVzhpeVgyQ1JCODlJc2dTVFJtZFdPeHR0aFNMVXZzZEtwd0YxTmloNjFtcEdMYjRnNmxES01Md0lMTmtBSkhCblBCSithdU5BPT0iLCJhbXIiOlsicnNhIiwibWZhIl0sImFwcGlkIjoiMDRiMDc3OTUtOGRkYi00NjFhLWJiZWUtMDJmOWUxYmY3YjQ2IiwiYXBwaWRhY3IiOiIwIiwiZGV2aWNlaWQiOiIxZDUwYzVkZi1mZDAxLTRhNjQtODg1OS04NDcyMzc1OGEyNDQiLCJmYW1pbHlfbmFtZSI6IktsZWluIiwiZ2l2ZW5fbmFtZSI6IkJyYW5kb24iLCJpcGFkZHIiOiIxMzEuMTA3LjE1OS4yMiIsIm5hbWUiOiJCcmFuZG9uIEtsZWluIiwib2lkIjoiMjcyNDQ5MzUtYTRiOS00MGE0LWEyNzItNDI5NDJiNjdlY2YxIiwib25wcmVtX3NpZCI6IlMtMS01LTIxLTIxMjc1MjExODQtMTYwNDAxMjkyMC0xODg3OTI3NTI3LTMwODY5MTc0IiwicHVpZCI6IjEwMDMwMDAwQTkxNzc4OUUiLCJzY3AiOiJ1c2VyX2ltcGVyc29uYXRpb24iLCJzdWIiOiJaTnRJSW14ajVlSk9TbnJRTXh1UTFGeGVPOHhiYnVhQmFrU0FYYjRqRE84IiwidGlkIjoiNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3IiwidW5pcXVlX25hbWUiOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1cG4iOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1dGkiOiIzcU02WE1IMS1VeTc1OGREc3VFUUFBIiwidmVyIjoiMS4wIn0.6wF-URC5pN8R6lYNu887Vqul47X3Kpm5g_d0S6tYjtW42KcCv95dvXiWr3_xQ62vDBHLekWJUFTDt-JIa-7Jothw-k4LGIe4OyT3c5VeLMupH5fepX8puj3cfxUAubdUIwq3nw8XHksD979tOyFh_lOCyHPNs69UgpQUufHkX-262eCQjlQoXTigdmxd4uhW7ybcLKxTyIh16K8JI3tHU6lQQDeKGDVqgkXTWqAHWhlHiaZ8SYsfjV07lLS-YnBmjyM16WHnDCaUwDy326rKfbdsAS2r6br2NERDpX_yoq01rFxP1mzQrnokb7sAJBQbV5dqalO3kU0JwvcGwhO3hQ" - ], - "Content-Type": [ - "application/json; odata=minimalmetadata; charset=utf-8" - ], - "client-request-id": [ - "3518a5a6-c372-11e9-b6fb-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "return-client-request-id": [ - "false" - ], - "Content-Length": [ - "76" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "DataServiceId": [ - "https://sdktest2.westcentralus.batch.azure.com/jobs/job-1" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:49 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "ETag": [ - "0x8D7259619F1E38A" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:44:50 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Location": [ - "https://sdktest2.westcentralus.batch.azure.com/jobs/job-1" - ], - "Transfer-Encoding": [ - "chunked" - ], - "request-id": [ - "6f413b44-c0e2-41ba-8426-851dcaf3d109" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2/addtaskcollection?api-version=2019-08-01.10.0", - "body": "{\"value\": [{\"id\": \"myTask\", \"commandLine\": \"cmd /c echo | set /p dummy=test\", \"outputFiles\": [{\"filePattern\": \"$AZ_BATCH_TASK_DIR/*.txt\", \"destination\": {\"container\": {\"containerUrl\": \"https://sdkteststore2.blob.core.windows.net/aaatestcontainer?st=2019-08-20T17%3A39%3A08Z&se=2019-08-21T17%3A39%3A08Z&sp=rw&sv=2017-07-29&sr=c&sig=6MqdvD/tVrcMqBh/xoltuDEmtGg79s3sE1V3lcoFaoM%3D\"}}, \"uploadOptions\": {\"uploadCondition\": \"tasksuccess\"}}], \"constraints\": {\"retentionTime\": \"PT1H\"}}]}", - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL2JhdGNoLmNvcmUud2luZG93cy5uZXQvIiwiaXNzIjoiaHR0cHM6Ly9zdHMud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3LyIsImlhdCI6MTU2NjMyMDcyNCwibmJmIjoxNTY2MzIwNzI0LCJleHAiOjE1NjYzMjQ2MjQsIl9jbGFpbV9uYW1lcyI6eyJncm91cHMiOiJzcmMxIn0sIl9jbGFpbV9zb3VyY2VzIjp7InNyYzEiOnsiZW5kcG9pbnQiOiJodHRwczovL2dyYXBoLndpbmRvd3MubmV0LzcyZjk4OGJmLTg2ZjEtNDFhZi05MWFiLTJkN2NkMDExZGI0Ny91c2Vycy8yNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEvZ2V0TWVtYmVyT2JqZWN0cyJ9fSwiYWNyIjoiMSIsImFpbyI6IkFVUUF1LzhNQUFBQUJmSVhrS1pUNXN2dGVyVzhpeVgyQ1JCODlJc2dTVFJtZFdPeHR0aFNMVXZzZEtwd0YxTmloNjFtcEdMYjRnNmxES01Md0lMTmtBSkhCblBCSithdU5BPT0iLCJhbXIiOlsicnNhIiwibWZhIl0sImFwcGlkIjoiMDRiMDc3OTUtOGRkYi00NjFhLWJiZWUtMDJmOWUxYmY3YjQ2IiwiYXBwaWRhY3IiOiIwIiwiZGV2aWNlaWQiOiIxZDUwYzVkZi1mZDAxLTRhNjQtODg1OS04NDcyMzc1OGEyNDQiLCJmYW1pbHlfbmFtZSI6IktsZWluIiwiZ2l2ZW5fbmFtZSI6IkJyYW5kb24iLCJpcGFkZHIiOiIxMzEuMTA3LjE1OS4yMiIsIm5hbWUiOiJCcmFuZG9uIEtsZWluIiwib2lkIjoiMjcyNDQ5MzUtYTRiOS00MGE0LWEyNzItNDI5NDJiNjdlY2YxIiwib25wcmVtX3NpZCI6IlMtMS01LTIxLTIxMjc1MjExODQtMTYwNDAxMjkyMC0xODg3OTI3NTI3LTMwODY5MTc0IiwicHVpZCI6IjEwMDMwMDAwQTkxNzc4OUUiLCJzY3AiOiJ1c2VyX2ltcGVyc29uYXRpb24iLCJzdWIiOiJaTnRJSW14ajVlSk9TbnJRTXh1UTFGeGVPOHhiYnVhQmFrU0FYYjRqRE84IiwidGlkIjoiNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3IiwidW5pcXVlX25hbWUiOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1cG4iOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1dGkiOiIzcU02WE1IMS1VeTc1OGREc3VFUUFBIiwidmVyIjoiMS4wIn0.6wF-URC5pN8R6lYNu887Vqul47X3Kpm5g_d0S6tYjtW42KcCv95dvXiWr3_xQ62vDBHLekWJUFTDt-JIa-7Jothw-k4LGIe4OyT3c5VeLMupH5fepX8puj3cfxUAubdUIwq3nw8XHksD979tOyFh_lOCyHPNs69UgpQUufHkX-262eCQjlQoXTigdmxd4uhW7ybcLKxTyIh16K8JI3tHU6lQQDeKGDVqgkXTWqAHWhlHiaZ8SYsfjV07lLS-YnBmjyM16WHnDCaUwDy326rKfbdsAS2r6br2NERDpX_yoq01rFxP1mzQrnokb7sAJBQbV5dqalO3kU0JwvcGwhO3hQ" - ], - "Content-Type": [ - "application/json; odata=minimalmetadata; charset=utf-8" - ], - "client-request-id": [ - "35c02a80-c372-11e9-8b22-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "480" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:50 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "f4364c5e-0770-4b56-977b-fbf898085125" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#taskaddresult\",\"value\":[\r\n {\r\n \"status\":\"Success\",\"taskId\":\"myTask\",\"eTag\":\"0x8D725961A19321F\",\"lastModified\":\"2019-08-20T17:44:50.9395487Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2/tasks/myTask\"\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "35f88f76-c372-11e9-8423-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:44:51 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:c3SjP2Z5TDuoAdLqkK4VgaV/UEbwHoTfp8M+/KtfBuQ=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:50 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D7259619F1E38A" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:44:50 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "cdc71dc7-4c99-4967-9c34-961ced37c9bd" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#jobs/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2\",\"eTag\":\"0x8D7259619F1E38A\",\"lastModified\":\"2019-08-20T17:44:50.6819466Z\",\"creationTime\":\"2019-08-20T17:44:50.6418336Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:44:50.6819466Z\",\"priority\":0,\"usesTaskDependencies\":false,\"constraints\":{\r\n \"maxWallClockTime\":\"P10675199DT2H48M5.4775807S\",\"maxTaskRetryCount\":0\r\n },\"poolInfo\":{\r\n \"poolId\":\"ncj-windows-2012-r2\"\r\n },\"executionInfo\":{\r\n \"startTime\":\"2019-08-20T17:44:50.6819466Z\",\"poolId\":\"ncj-windows-2012-r2\"\r\n },\"onAllTasksComplete\":\"noaction\",\"onTaskFailure\":\"noaction\"\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2/tasks?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "3601b72e-c372-11e9-bd4a-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:44:51 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:09u9b/SvpMgiDeZ82VI2GtFwOVJ1mDtYF3+Tjtr1OWk=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:50 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "14bc663a-faaa-4f17-a258-feeaf6ee0d65" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#tasks\",\"value\":[\r\n {\r\n \"id\":\"myTask\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2/tasks/myTask\",\"eTag\":\"0x8D725961A19321F\",\"creationTime\":\"2019-08-20T17:44:50.9395487Z\",\"lastModified\":\"2019-08-20T17:44:50.9395487Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:44:50.9395487Z\",\"commandLine\":\"cmd /c echo | set /p dummy=test\",\"outputFiles\":[\r\n {\r\n \"filePattern\":\"$AZ_BATCH_TASK_DIR/*.txt\",\"destination\":{\r\n \"container\":{\r\n \"containerUrl\":\"https://sdkteststore2.blob.core.windows.net/aaatestcontainer?st=2019-08-20T17%3A39%3A08Z&se=2019-08-21T17%3A39%3A08Z&sp=rw&sv=2017-07-29&sr=c&sig=6MqdvD/tVrcMqBh/xoltuDEmtGg79s3sE1V3lcoFaoM%3D\"\r\n }\r\n },\"uploadOptions\":{\r\n \"uploadCondition\":\"TaskSuccess\"\r\n }\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"nonadmin\"\r\n }\r\n },\"constraints\":{\r\n \"maxWallClockTime\":\"P10675199DT2H48M5.4775807S\",\"retentionTime\":\"PT1H\",\"maxTaskRetryCount\":0\r\n },\"executionInfo\":{\r\n \"retryCount\":0,\"requeueCount\":0\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2/tasks?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "37d61308-c372-11e9-be6c-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:44:54 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:U+uIjZHLFPXiaQX+7zZ8qLhKX4I1mgueAHi7HVdX0UA=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:54 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "a2446cd3-6853-4a16-8c22-2c94a2754ed0" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#tasks\",\"value\":[\r\n {\r\n \"id\":\"myTask\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2/tasks/myTask\",\"eTag\":\"0x8D725961A19321F\",\"creationTime\":\"2019-08-20T17:44:50.9395487Z\",\"lastModified\":\"2019-08-20T17:44:50.9395487Z\",\"state\":\"completed\",\"stateTransitionTime\":\"2019-08-20T17:44:53.967174Z\",\"previousState\":\"running\",\"previousStateTransitionTime\":\"2019-08-20T17:44:53.042985Z\",\"commandLine\":\"cmd /c echo | set /p dummy=test\",\"outputFiles\":[\r\n {\r\n \"filePattern\":\"$AZ_BATCH_TASK_DIR/*.txt\",\"destination\":{\r\n \"container\":{\r\n \"containerUrl\":\"https://sdkteststore2.blob.core.windows.net/aaatestcontainer?st=2019-08-20T17%3A39%3A08Z&se=2019-08-21T17%3A39%3A08Z&sp=rw&sv=2017-07-29&sr=c&sig=6MqdvD/tVrcMqBh/xoltuDEmtGg79s3sE1V3lcoFaoM%3D\"\r\n }\r\n },\"uploadOptions\":{\r\n \"uploadCondition\":\"TaskSuccess\"\r\n }\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"nonadmin\"\r\n }\r\n },\"constraints\":{\r\n \"maxWallClockTime\":\"P10675199DT2H48M5.4775807S\",\"retentionTime\":\"PT1H\",\"maxTaskRetryCount\":0\r\n },\"executionInfo\":{\r\n \"startTime\":\"2019-08-20T17:44:53.042985Z\",\"endTime\":\"2019-08-20T17:44:53.967174Z\",\"exitCode\":0,\"result\":\"success\",\"retryCount\":0,\"requeueCount\":0\r\n },\"nodeInfo\":{\r\n \"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"nodeUrl\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"poolId\":\"ncj-windows-2012-r2\",\"nodeId\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"taskRootDirectory\":\"workitems\\\\ncj-windows-2012-r2\\\\job-1\\\\myTask\",\"taskRootDirectoryUrl\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d/files/workitems/ncj-windows-2012-r2/job-1/myTask\"\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2/tasks/myTask?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "37dfa68c-c372-11e9-be46-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:44:54 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:/lu9QFjAcSU+lFPy0P3jVesFnmo9JrkhN3MnZO2agpo=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:44:54 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725961A19321F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:44:50 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "c81c4b07-4d0d-4704-9e34-c7f0300fe57e" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#tasks/@Element\",\"id\":\"myTask\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2/tasks/myTask\",\"eTag\":\"0x8D725961A19321F\",\"creationTime\":\"2019-08-20T17:44:50.9395487Z\",\"lastModified\":\"2019-08-20T17:44:50.9395487Z\",\"state\":\"completed\",\"stateTransitionTime\":\"2019-08-20T17:44:53.967174Z\",\"previousState\":\"running\",\"previousStateTransitionTime\":\"2019-08-20T17:44:53.042985Z\",\"commandLine\":\"cmd /c echo | set /p dummy=test\",\"outputFiles\":[\r\n {\r\n \"filePattern\":\"$AZ_BATCH_TASK_DIR/*.txt\",\"destination\":{\r\n \"container\":{\r\n \"containerUrl\":\"https://sdkteststore2.blob.core.windows.net/aaatestcontainer?st=2019-08-20T17%3A39%3A08Z&se=2019-08-21T17%3A39%3A08Z&sp=rw&sv=2017-07-29&sr=c&sig=6MqdvD/tVrcMqBh/xoltuDEmtGg79s3sE1V3lcoFaoM%3D\"\r\n }\r\n },\"uploadOptions\":{\r\n \"uploadCondition\":\"TaskSuccess\"\r\n }\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"nonadmin\"\r\n }\r\n },\"constraints\":{\r\n \"maxWallClockTime\":\"P10675199DT2H48M5.4775807S\",\"retentionTime\":\"PT1H\",\"maxTaskRetryCount\":0\r\n },\"executionInfo\":{\r\n \"startTime\":\"2019-08-20T17:44:53.042985Z\",\"endTime\":\"2019-08-20T17:44:53.967174Z\",\"exitCode\":0,\"result\":\"success\",\"retryCount\":0,\"requeueCount\":0\r\n },\"nodeInfo\":{\r\n \"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"nodeUrl\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"poolId\":\"ncj-windows-2012-r2\",\"nodeId\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"taskRootDirectory\":\"workitems\\\\ncj-windows-2012-r2\\\\job-1\\\\myTask\",\"taskRootDirectoryUrl\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d/files/workitems/ncj-windows-2012-r2/job-1/myTask\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/aaatestcontainer?restype=container&comp=list", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "37e93302-c372-11e9-9ff3-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:45:07 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:vFjgezCgXYwB9gBneM83XvFTiAcR6wfhkLB4D3acIwA=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:07 GMT" - ] - }, - "body": { - "string": "\ufefffileuploaderr.txtTue, 20 Aug 2019 17:44:53 GMT0x8D725961BCE810E0application/octet-stream1B2M2Y8AsgTpgAmY7PhCfg==BlockBlobHottrueunlockedavailabletruefileuploadout.txtTue, 20 Aug 2019 17:44:53 GMT0x8D725961BDE1438519application/octet-streamaWSSlhRJYQiJLliho4L66A==BlockBlobHottrueunlockedavailabletruestderr.txtTue, 20 Aug 2019 17:44:53 GMT0x8D725961BDFC2360application/octet-stream1B2M2Y8AsgTpgAmY7PhCfg==BlockBlobHottrueunlockedavailabletruestdout.txtTue, 20 Aug 2019 17:44:53 GMT0x8D725961BE122054application/octet-streamCY9rzUYh03PK3k6DJie09g==BlockBlobHottrueunlockedavailabletrue" - } - } - }, - { - "request": { - "method": "DELETE", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "401cc3c2-c372-11e9-9d78-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "0" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:08 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:4uT8P6RATBgsBNFtHK4NIV0Orr1jLHjxnPcoa6IcoOg=" - ] - } - }, - "response": { - "status": { - "code": 202, - "message": "Accepted" - }, - "headers": { - "Date": [ - "Tue, 20 Aug 2019 17:45:08 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "3b3de2bd-485d-40a1-ac3b-6dc3fb7ff8ab" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-output?restype=container&comp=list", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "40264dda-c372-11e9-94f7-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:45:08 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:3ne9gnvYTa8u9jFGyrPNH5iRKISz4XROeO8l5K7s4IY=" - ] - } - }, - "response": { - "status": { - "code": 404, - "message": "The specified container does not exist." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "ContainerNotFound" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:07 GMT" - ], - "Content-Length": [ - "225" - ] - }, - "body": { - "string": "\ufeffContainerNotFoundThe specified container does not exist.\nRequestId:7774310e-c01e-001c-0a7f-57bc17000000\nTime:2019-08-20T17:45:08.1858842Z" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/supportedimages?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "402ed95c-c372-11e9-ac69-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:08 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:4jCSKGtc1/VzTf9bgeYU6HQCNLJ+lD/ufb+VVegs0jE=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:08 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "3e5c493b-085c-4945-8e5a-bc7ad1573f9a" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#supportedimages\",\"value\":[\r\n {\r\n \"imageReference\":{\r\n \"publisher\":\"batch\",\"offer\":\"rendering-centos73\",\"sku\":\"rendering\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"batch\",\"offer\":\"rendering-windows2016\",\"sku\":\"rendering\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04.0-lts\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"18.04-lts\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 18.04\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"credativ\",\"offer\":\"debian\",\"sku\":\"8\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.debian 8\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"credativ\",\"offer\":\"debian\",\"sku\":\"8-backports\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.debian 8\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"credativ\",\"offer\":\"debian\",\"sku\":\"9\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.debian 9\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"credativ\",\"offer\":\"debian\",\"sku\":\"9-backports\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.debian 9\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-ads\",\"offer\":\"linux-data-science-vm\",\"sku\":\"linuxdsvm\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-ads\",\"offer\":\"standard-data-science-vm\",\"sku\":\"standard-data-science-vm\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"centos-container\",\"sku\":\"7-4\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"NvidiaGridDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"centos-container\",\"sku\":\"7-5\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"NvidiaGridDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"centos-container\",\"sku\":\"7-6\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"NvidiaGridDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"centos-container-rdma\",\"sku\":\"7-4\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"SupportsRDMAOnly\",\"IntelMPIRuntimeInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"ubuntu-server-container\",\"sku\":\"16-04-lts\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"NvidiaGridDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"ubuntu-server-container-rdma\",\"sku\":\"16-04-lts\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"SupportsRDMAOnly\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-dsvm\",\"offer\":\"azureml\",\"sku\":\"runtime\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-dsvm\",\"offer\":\"dsvm-windows\",\"sku\":\"server-2016\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-dsvm\",\"offer\":\"linux-data-science-vm-ubuntu\",\"sku\":\"linuxdsvmubuntu\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2008-r2-sp1\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2008-r2-sp1-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-datacenter\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-datacenter-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2016-datacenter\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2016-datacenter-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2016-datacenter-with-containers\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-core\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-core-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-core-with-containers\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-core-with-containers-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-with-containers\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-with-containers-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserversemiannual\",\"sku\":\"datacenter-core-1709-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserversemiannual\",\"sku\":\"datacenter-core-1709-with-containers-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserversemiannual\",\"sku\":\"datacenter-core-1803-with-containers-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserversemiannual\",\"sku\":\"datacenter-core-1809-with-containers-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.0\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.1\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.2\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.3\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.4\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.5\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.6\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos-hpc\",\"sku\":\"7.1\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"SupportsRDMAOnly\",\"IntelMPIRuntimeInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos-hpc\",\"sku\":\"7.3\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"SupportsRDMAOnly\",\"IntelMPIRuntimeInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos-hpc\",\"sku\":\"7.4\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"SupportsRDMAOnly\",\"IntelMPIRuntimeInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos-hpc\",\"sku\":\"7.6\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"SupportsRDMAOnly\",\"IntelMPIRuntimeInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"oracle\",\"offer\":\"oracle-linux\",\"sku\":\"7.3\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"oracle\",\"offer\":\"oracle-linux\",\"sku\":\"7.4\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"oracle\",\"offer\":\"oracle-linux\",\"sku\":\"7.5\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"oracle\",\"offer\":\"oracle-linux\",\"sku\":\"7.6\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools?api-version=2019-08-01.10.0", - "body": "{\"id\": \"ncj-ubuntu1604\", \"vmSize\": \"STANDARD_D1_V2\", \"virtualMachineConfiguration\": {\"imageReference\": {\"publisher\": \"canonical\", \"offer\": \"ubuntuserver\", \"sku\": \"16.04-lts\"}, \"nodeAgentSKUId\": \"batch.node.ubuntu 16.04\"}, \"targetDedicatedNodes\": 1}", - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Content-Type": [ - "application/json; odata=minimalmetadata; charset=utf-8" - ], - "client-request-id": [ - "403987a6-c372-11e9-bcff-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "248" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:08 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:9HlWXAwOKnUZW1DN7JEbM91mCbJ+drOivOR/VXS2HYM=" - ] - } - }, - "response": { - "status": { - "code": 409, - "message": "The specified pool already exists." - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:08 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "59a509f7-71d3-4ce8-a8ef-97e376477f4a" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ], - "Content-Length": [ - "334" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#Microsoft.Azure.Batch.Protocol.Entities.Container.errors/@Element\",\"code\":\"PoolExists\",\"message\":{\r\n \"lang\":\"en-US\",\"value\":\"The specified pool already exists.\\nRequestId:59a509f7-71d3-4ce8-a8ef-97e376477f4a\\nTime:2019-08-20T17:45:08.3482716Z\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "4047df90-c372-11e9-b81c-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:08 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:WHdhVUt9UpYSQv/yCLrZAlPUG7M9DiNrJQ7t8NJOo10=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:08 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725954EB17F5F" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:39:09 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "40bf1b98-4a01-4ef9-b672-a90255deb51a" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-ubuntu1604\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604\",\"eTag\":\"0x8D725954EB17F5F\",\"lastModified\":\"2019-08-20T17:39:09.6824671Z\",\"creationTime\":\"2019-08-20T17:39:09.6824671Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:39:09.6824671Z\",\"allocationState\":\"steady\",\"allocationStateTransitionTime\":\"2019-08-20T17:40:16.1469849Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":1,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "405026d8-c372-11e9-a4e9-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:08 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:T5/eohhbVrpv9MEKJm+mpWFl1pKPzicTFSkXKiJvcDc=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:08 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "d1cfbff1-dc5a-4959-9b02-95e57876a15a" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604/nodes/tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"state\":\"idle\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:40:26.38171Z\",\"lastBootTime\":\"2019-08-20T17:40:21.194161Z\",\"allocationTime\":\"2019-08-20T17:40:15.2984459Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":1,\"totalTasksSucceeded\":1,\"runningTasksCount\":0,\"recentTasks\":[\r\n {\r\n \"taskUrl\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604/tasks/myTask\",\"jobId\":\"ncj-ubuntu1604\",\"taskId\":\"myTask\",\"taskState\":\"completed\",\"executionInfo\":{\r\n \"startTime\":\"2019-08-20T17:40:25.735322Z\",\"endTime\":\"2019-08-20T17:40:26.353947Z\",\"exitCode\":0,\"result\":\"success\",\"retryCount\":0,\"requeueCount\":0\r\n }\r\n }\r\n ],\"certificateReferences\":[\r\n \r\n ],\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.16.171\",\"publicFQDN\":\"dns07c600ce-fa0e-4d8b-8d1f-948d1d69b8c7-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":22\r\n }\r\n ]\r\n },\"nodeAgentInfo\":{\r\n \"lastUpdateTime\":\"2019-08-20T17:40:21.194161Z\",\"version\":\"1.6.4\"\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Batch/batchAccounts?api-version=2019-08-01", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-mgmt-batch/7.0.0 Azure-SDK-For-Python" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL21hbmFnZW1lbnQuY29yZS53aW5kb3dzLm5ldC8iLCJpc3MiOiJodHRwczovL3N0cy53aW5kb3dzLm5ldC83MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDcvIiwiaWF0IjoxNTY2MzIwNzE4LCJuYmYiOjE1NjYzMjA3MTgsImV4cCI6MTU2NjMyNDYxOCwiX2NsYWltX25hbWVzIjp7Imdyb3VwcyI6InNyYzEifSwiX2NsYWltX3NvdXJjZXMiOnsic3JjMSI6eyJlbmRwb2ludCI6Imh0dHBzOi8vZ3JhcGgud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3L3VzZXJzLzI3MjQ0OTM1LWE0YjktNDBhNC1hMjcyLTQyOTQyYjY3ZWNmMS9nZXRNZW1iZXJPYmplY3RzIn19LCJhY3IiOiIxIiwiYWlvIjoiQVVRQXUvOE1BQUFBT0lvTEVUclJGWnErQmFoaVNrVmhNNXR5QzYwSDZBSDNlZG5vMGJQbTFRYUtvV3Rva01QaDdiZjIvM0VFZ0NHbmo0UFFWY3FHaXdVbkFQYjRONmZwZ1E9PSIsImFtciI6WyJyc2EiLCJtZmEiXSwiYXBwaWQiOiIwNGIwNzc5NS04ZGRiLTQ2MWEtYmJlZS0wMmY5ZTFiZjdiNDYiLCJhcHBpZGFjciI6IjAiLCJkZXZpY2VpZCI6IjFkNTBjNWRmLWZkMDEtNGE2NC04ODU5LTg0NzIzNzU4YTI0NCIsImZhbWlseV9uYW1lIjoiS2xlaW4iLCJnaXZlbl9uYW1lIjoiQnJhbmRvbiIsImlwYWRkciI6IjEzMS4xMDcuMTU5LjIyIiwibmFtZSI6IkJyYW5kb24gS2xlaW4iLCJvaWQiOiIyNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEiLCJvbnByZW1fc2lkIjoiUy0xLTUtMjEtMjEyNzUyMTE4NC0xNjA0MDEyOTIwLTE4ODc5Mjc1MjctMzA4NjkxNzQiLCJwdWlkIjoiMTAwMzAwMDBBOTE3Nzg5RSIsInNjcCI6InVzZXJfaW1wZXJzb25hdGlvbiIsInN1YiI6IjMtaVZMWlVxZzhyWVVFNHlLRXZPSktES0N2Z1I0SVJvQXJhVzlRWmJNRkEiLCJ0aWQiOiI3MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDciLCJ1bmlxdWVfbmFtZSI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInVwbiI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInV0aSI6IlZkQ05pT2w3Z0UyWkw3QTVBMFFPQUEiLCJ2ZXIiOiIxLjAifQ.XjlVAUievRf_e8bKWsAY7Ca1e2RR2FIB4PpXBKa6Vzy5xfZ_c33OFQWkB610FXt-E86jl61B0siTx1aVQQbXt9iAdqcfb27MKeDX_sXi_BjTUIA6xgfRm1CnG8vFq_GpLPy0GIgzuQkaPqPifXIz39SzMavmrLaAp5Ct1j09e9yXwcIxLhSRg_WibgqY22tbcremd_-y9qZex3xEzc798Nz62_AADDKgBjivlwxGX5TpOiEZxhNhD6pS4nlTJ4eiyS7mFRC1nIGB1SMZrgnWjQ5dRcib_7krgdW_4J-kqA-Tg4FGo8aPFBxjMADxfCOF04W2KykUZpLfF_9c2HZGoQ" - ], - "x-ms-client-request-id": [ - "4085678c-c372-11e9-ae88-44032c851686" - ], - "accept-language": [ - "en-US" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json; charset=utf-8" - ], - "Expires": [ - "-1" - ], - "Vary": [ - "Accept-Encoding" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:08 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "content-length": [ - "2729" - ], - "x-ms-original-request-ids": [ - "65653993-abb7-44ba-9070-2e176961dbff", - "0a63772d-594c-4233-8c67-1adead346419" - ], - "Cache-Control": [ - "no-cache" - ], - "Pragma": [ - "no-cache" - ] - }, - "body": { - "string": "{\"value\":[{\"id\":\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Batch/batchAccounts/byossc\",\"name\":\"byossc\",\"type\":\"Microsoft.Batch/batchAccounts\",\"location\":\"southcentralus\",\"properties\":{\"accountEndpoint\":\"byossc.southcentralus.batch.azure.com\",\"provisioningState\":\"Succeeded\",\"dedicatedCoreQuotaPerVMFamilyEnforced\":false,\"poolQuota\":100,\"activeJobAndJobScheduleQuota\":300,\"poolAllocationMode\":\"UserSubscription\",\"keyVaultReference\":{\"id\":\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.KeyVault/vaults/byossc\",\"url\":\"https://byossc.vault.azure.net/\"}}},{\"id\":\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Batch/batchAccounts/sdktest2\",\"name\":\"sdktest2\",\"type\":\"Microsoft.Batch/batchAccounts\",\"location\":\"westcentralus\",\"properties\":{\"accountEndpoint\":\"sdktest2.westcentralus.batch.azure.com\",\"provisioningState\":\"Succeeded\",\"dedicatedCoreQuota\":20,\"dedicatedCoreQuotaPerVMFamily\":[{\"name\":\"standardA0_A7Family\",\"coreQuota\":20},{\"name\":\"standardDv2Family\",\"coreQuota\":20},{\"name\":\"standardA8_A11Family\",\"coreQuota\":0},{\"name\":\"standardDFamily\",\"coreQuota\":0},{\"name\":\"standardGFamily\",\"coreQuota\":0},{\"name\":\"basicAFamily\",\"coreQuota\":0},{\"name\":\"standardFFamily\",\"coreQuota\":0},{\"name\":\"standardNVFamily\",\"coreQuota\":0},{\"name\":\"standardNVPromoFamily\",\"coreQuota\":0},{\"name\":\"standardNCFamily\",\"coreQuota\":0},{\"name\":\"standardNCPromoFamily\",\"coreQuota\":0},{\"name\":\"standardHFamily\",\"coreQuota\":0},{\"name\":\"standardHPromoFamily\",\"coreQuota\":0},{\"name\":\"standardAv2Family\",\"coreQuota\":0},{\"name\":\"standardMSFamily\",\"coreQuota\":0},{\"name\":\"standardDv3Family\",\"coreQuota\":0},{\"name\":\"standardEv3Family\",\"coreQuota\":0},{\"name\":\"standardDSFamily\",\"coreQuota\":0},{\"name\":\"standardDSv2Family\",\"coreQuota\":0},{\"name\":\"standardDSv3Family\",\"coreQuota\":0},{\"name\":\"standardFSFamily\",\"coreQuota\":0},{\"name\":\"standardESv3Family\",\"coreQuota\":0},{\"name\":\"standardGSFamily\",\"coreQuota\":0},{\"name\":\"standardLSFamily\",\"coreQuota\":0},{\"name\":\"standardNCSv2Family\",\"coreQuota\":0},{\"name\":\"standardNDSFamily\",\"coreQuota\":0},{\"name\":\"standardNCSv3Family\",\"coreQuota\":0},{\"name\":\"standardFSv2Family\",\"coreQuota\":0},{\"name\":\"standardHBSFamily\",\"coreQuota\":0},{\"name\":\"standardHCSFamily\",\"coreQuota\":0}],\"dedicatedCoreQuotaPerVMFamilyEnforced\":false,\"lowPriorityCoreQuota\":100,\"poolQuota\":100,\"activeJobAndJobScheduleQuota\":300,\"autoStorage\":{\"storageAccountId\":\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Storage/storageAccounts/sdkteststore2\",\"lastKeySync\":\"2019-07-16T21:55:40.4909987Z\"},\"poolAllocationMode\":\"BatchService\"},\"tags\":{\"rawr\":\"test\"}}]}" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Storage/storageAccounts/sdkteststore2/listKeys?api-version=2018-02-01", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-mgmt-storage/2.0.0 Azure-SDK-For-Python" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL21hbmFnZW1lbnQuY29yZS53aW5kb3dzLm5ldC8iLCJpc3MiOiJodHRwczovL3N0cy53aW5kb3dzLm5ldC83MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDcvIiwiaWF0IjoxNTY2MzIwNzE4LCJuYmYiOjE1NjYzMjA3MTgsImV4cCI6MTU2NjMyNDYxOCwiX2NsYWltX25hbWVzIjp7Imdyb3VwcyI6InNyYzEifSwiX2NsYWltX3NvdXJjZXMiOnsic3JjMSI6eyJlbmRwb2ludCI6Imh0dHBzOi8vZ3JhcGgud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3L3VzZXJzLzI3MjQ0OTM1LWE0YjktNDBhNC1hMjcyLTQyOTQyYjY3ZWNmMS9nZXRNZW1iZXJPYmplY3RzIn19LCJhY3IiOiIxIiwiYWlvIjoiQVVRQXUvOE1BQUFBT0lvTEVUclJGWnErQmFoaVNrVmhNNXR5QzYwSDZBSDNlZG5vMGJQbTFRYUtvV3Rva01QaDdiZjIvM0VFZ0NHbmo0UFFWY3FHaXdVbkFQYjRONmZwZ1E9PSIsImFtciI6WyJyc2EiLCJtZmEiXSwiYXBwaWQiOiIwNGIwNzc5NS04ZGRiLTQ2MWEtYmJlZS0wMmY5ZTFiZjdiNDYiLCJhcHBpZGFjciI6IjAiLCJkZXZpY2VpZCI6IjFkNTBjNWRmLWZkMDEtNGE2NC04ODU5LTg0NzIzNzU4YTI0NCIsImZhbWlseV9uYW1lIjoiS2xlaW4iLCJnaXZlbl9uYW1lIjoiQnJhbmRvbiIsImlwYWRkciI6IjEzMS4xMDcuMTU5LjIyIiwibmFtZSI6IkJyYW5kb24gS2xlaW4iLCJvaWQiOiIyNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEiLCJvbnByZW1fc2lkIjoiUy0xLTUtMjEtMjEyNzUyMTE4NC0xNjA0MDEyOTIwLTE4ODc5Mjc1MjctMzA4NjkxNzQiLCJwdWlkIjoiMTAwMzAwMDBBOTE3Nzg5RSIsInNjcCI6InVzZXJfaW1wZXJzb25hdGlvbiIsInN1YiI6IjMtaVZMWlVxZzhyWVVFNHlLRXZPSktES0N2Z1I0SVJvQXJhVzlRWmJNRkEiLCJ0aWQiOiI3MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDciLCJ1bmlxdWVfbmFtZSI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInVwbiI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInV0aSI6IlZkQ05pT2w3Z0UyWkw3QTVBMFFPQUEiLCJ2ZXIiOiIxLjAifQ.XjlVAUievRf_e8bKWsAY7Ca1e2RR2FIB4PpXBKa6Vzy5xfZ_c33OFQWkB610FXt-E86jl61B0siTx1aVQQbXt9iAdqcfb27MKeDX_sXi_BjTUIA6xgfRm1CnG8vFq_GpLPy0GIgzuQkaPqPifXIz39SzMavmrLaAp5Ct1j09e9yXwcIxLhSRg_WibgqY22tbcremd_-y9qZex3xEzc798Nz62_AADDKgBjivlwxGX5TpOiEZxhNhD6pS4nlTJ4eiyS7mFRC1nIGB1SMZrgnWjQ5dRcib_7krgdW_4J-kqA-Tg4FGo8aPFBxjMADxfCOF04W2KykUZpLfF_9c2HZGoQ" - ], - "Content-Type": [ - "application/json; charset=utf-8" - ], - "x-ms-client-request-id": [ - "40de58ac-c372-11e9-b51a-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json" - ], - "Expires": [ - "-1" - ], - "Vary": [ - "Accept-Encoding" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:09 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "Server": [ - "Microsoft-Azure-Storage-Resource-Provider/1.0,Microsoft-HTTPAPI/2.0 Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "Cache-Control": [ - "no-cache" - ], - "x-ms-ratelimit-remaining-subscription-writes": [ - "1199" - ], - "Pragma": [ - "no-cache" - ], - "content-length": [ - "288" - ] - }, - "body": { - "string": "{\"keys\":[{\"keyName\":\"key1\",\"value\":\"abc==\",\"permissions\":\"FULL\"},{\"keyName\":\"key2\",\"value\":\"def==\",\"permissions\":\"FULL\"}]}" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-output?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "41177f90-c372-11e9-8c45-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:45:09 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:4h2q9L3MdHdVaNqR5kNfdbi5dOvGSuWjlgM588LGMBg=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "x-ms-version": [ - "2017-07-29" - ], - "ETag": [ - "\"0x8D7259625642E21\"" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:09 GMT" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:09 GMT" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs?api-version=2019-08-01.10.0&timeout=30", - "body": "{\"id\": \"ncj-ubuntu1604-1\", \"poolInfo\": {\"poolId\": \"ncj-ubuntu1604\"}}", - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL2JhdGNoLmNvcmUud2luZG93cy5uZXQvIiwiaXNzIjoiaHR0cHM6Ly9zdHMud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3LyIsImlhdCI6MTU2NjMyMDcyNCwibmJmIjoxNTY2MzIwNzI0LCJleHAiOjE1NjYzMjQ2MjQsIl9jbGFpbV9uYW1lcyI6eyJncm91cHMiOiJzcmMxIn0sIl9jbGFpbV9zb3VyY2VzIjp7InNyYzEiOnsiZW5kcG9pbnQiOiJodHRwczovL2dyYXBoLndpbmRvd3MubmV0LzcyZjk4OGJmLTg2ZjEtNDFhZi05MWFiLTJkN2NkMDExZGI0Ny91c2Vycy8yNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEvZ2V0TWVtYmVyT2JqZWN0cyJ9fSwiYWNyIjoiMSIsImFpbyI6IkFVUUF1LzhNQUFBQUJmSVhrS1pUNXN2dGVyVzhpeVgyQ1JCODlJc2dTVFJtZFdPeHR0aFNMVXZzZEtwd0YxTmloNjFtcEdMYjRnNmxES01Md0lMTmtBSkhCblBCSithdU5BPT0iLCJhbXIiOlsicnNhIiwibWZhIl0sImFwcGlkIjoiMDRiMDc3OTUtOGRkYi00NjFhLWJiZWUtMDJmOWUxYmY3YjQ2IiwiYXBwaWRhY3IiOiIwIiwiZGV2aWNlaWQiOiIxZDUwYzVkZi1mZDAxLTRhNjQtODg1OS04NDcyMzc1OGEyNDQiLCJmYW1pbHlfbmFtZSI6IktsZWluIiwiZ2l2ZW5fbmFtZSI6IkJyYW5kb24iLCJpcGFkZHIiOiIxMzEuMTA3LjE1OS4yMiIsIm5hbWUiOiJCcmFuZG9uIEtsZWluIiwib2lkIjoiMjcyNDQ5MzUtYTRiOS00MGE0LWEyNzItNDI5NDJiNjdlY2YxIiwib25wcmVtX3NpZCI6IlMtMS01LTIxLTIxMjc1MjExODQtMTYwNDAxMjkyMC0xODg3OTI3NTI3LTMwODY5MTc0IiwicHVpZCI6IjEwMDMwMDAwQTkxNzc4OUUiLCJzY3AiOiJ1c2VyX2ltcGVyc29uYXRpb24iLCJzdWIiOiJaTnRJSW14ajVlSk9TbnJRTXh1UTFGeGVPOHhiYnVhQmFrU0FYYjRqRE84IiwidGlkIjoiNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3IiwidW5pcXVlX25hbWUiOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1cG4iOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1dGkiOiIzcU02WE1IMS1VeTc1OGREc3VFUUFBIiwidmVyIjoiMS4wIn0.6wF-URC5pN8R6lYNu887Vqul47X3Kpm5g_d0S6tYjtW42KcCv95dvXiWr3_xQ62vDBHLekWJUFTDt-JIa-7Jothw-k4LGIe4OyT3c5VeLMupH5fepX8puj3cfxUAubdUIwq3nw8XHksD979tOyFh_lOCyHPNs69UgpQUufHkX-262eCQjlQoXTigdmxd4uhW7ybcLKxTyIh16K8JI3tHU6lQQDeKGDVqgkXTWqAHWhlHiaZ8SYsfjV07lLS-YnBmjyM16WHnDCaUwDy326rKfbdsAS2r6br2NERDpX_yoq01rFxP1mzQrnokb7sAJBQbV5dqalO3kU0JwvcGwhO3hQ" - ], - "Content-Type": [ - "application/json; odata=minimalmetadata; charset=utf-8" - ], - "client-request-id": [ - "413b2a66-c372-11e9-84f1-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "return-client-request-id": [ - "false" - ], - "Content-Length": [ - "68" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "DataServiceId": [ - "https://sdktest2.westcentralus.batch.azure.com/jobs/job-1" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:10 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "ETag": [ - "0x8D725962617779D" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:11 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Location": [ - "https://sdktest2.westcentralus.batch.azure.com/jobs/job-1" - ], - "Transfer-Encoding": [ - "chunked" - ], - "request-id": [ - "63a51513-af28-42cc-825f-6595cf5850a3" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604-1/addtaskcollection?api-version=2019-08-01.10.0", - "body": "{\"value\": [{\"id\": \"myTask\", \"commandLine\": \"/bin/bash -c \\\"echo test\\\"\", \"outputFiles\": [{\"filePattern\": \"$AZ_BATCH_TASK_DIR/*.txt\", \"destination\": {\"container\": {\"containerUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A30%3A09Z&se=2019-08-27T17%3A45%3A09Z&sp=w&sv=2017-07-29&sr=c&sig=nij98v6B3zhcV68rPGSG6wsdfjvePrD8MdMA%2BdWgM78%3D\"}}, \"uploadOptions\": {\"uploadCondition\": \"tasksuccess\"}}], \"constraints\": {\"retentionTime\": \"PT1H\"}}]}", - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL2JhdGNoLmNvcmUud2luZG93cy5uZXQvIiwiaXNzIjoiaHR0cHM6Ly9zdHMud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3LyIsImlhdCI6MTU2NjMyMDcyNCwibmJmIjoxNTY2MzIwNzI0LCJleHAiOjE1NjYzMjQ2MjQsIl9jbGFpbV9uYW1lcyI6eyJncm91cHMiOiJzcmMxIn0sIl9jbGFpbV9zb3VyY2VzIjp7InNyYzEiOnsiZW5kcG9pbnQiOiJodHRwczovL2dyYXBoLndpbmRvd3MubmV0LzcyZjk4OGJmLTg2ZjEtNDFhZi05MWFiLTJkN2NkMDExZGI0Ny91c2Vycy8yNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEvZ2V0TWVtYmVyT2JqZWN0cyJ9fSwiYWNyIjoiMSIsImFpbyI6IkFVUUF1LzhNQUFBQUJmSVhrS1pUNXN2dGVyVzhpeVgyQ1JCODlJc2dTVFJtZFdPeHR0aFNMVXZzZEtwd0YxTmloNjFtcEdMYjRnNmxES01Md0lMTmtBSkhCblBCSithdU5BPT0iLCJhbXIiOlsicnNhIiwibWZhIl0sImFwcGlkIjoiMDRiMDc3OTUtOGRkYi00NjFhLWJiZWUtMDJmOWUxYmY3YjQ2IiwiYXBwaWRhY3IiOiIwIiwiZGV2aWNlaWQiOiIxZDUwYzVkZi1mZDAxLTRhNjQtODg1OS04NDcyMzc1OGEyNDQiLCJmYW1pbHlfbmFtZSI6IktsZWluIiwiZ2l2ZW5fbmFtZSI6IkJyYW5kb24iLCJpcGFkZHIiOiIxMzEuMTA3LjE1OS4yMiIsIm5hbWUiOiJCcmFuZG9uIEtsZWluIiwib2lkIjoiMjcyNDQ5MzUtYTRiOS00MGE0LWEyNzItNDI5NDJiNjdlY2YxIiwib25wcmVtX3NpZCI6IlMtMS01LTIxLTIxMjc1MjExODQtMTYwNDAxMjkyMC0xODg3OTI3NTI3LTMwODY5MTc0IiwicHVpZCI6IjEwMDMwMDAwQTkxNzc4OUUiLCJzY3AiOiJ1c2VyX2ltcGVyc29uYXRpb24iLCJzdWIiOiJaTnRJSW14ajVlSk9TbnJRTXh1UTFGeGVPOHhiYnVhQmFrU0FYYjRqRE84IiwidGlkIjoiNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3IiwidW5pcXVlX25hbWUiOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1cG4iOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1dGkiOiIzcU02WE1IMS1VeTc1OGREc3VFUUFBIiwidmVyIjoiMS4wIn0.6wF-URC5pN8R6lYNu887Vqul47X3Kpm5g_d0S6tYjtW42KcCv95dvXiWr3_xQ62vDBHLekWJUFTDt-JIa-7Jothw-k4LGIe4OyT3c5VeLMupH5fepX8puj3cfxUAubdUIwq3nw8XHksD979tOyFh_lOCyHPNs69UgpQUufHkX-262eCQjlQoXTigdmxd4uhW7ybcLKxTyIh16K8JI3tHU6lQQDeKGDVqgkXTWqAHWhlHiaZ8SYsfjV07lLS-YnBmjyM16WHnDCaUwDy326rKfbdsAS2r6br2NERDpX_yoq01rFxP1mzQrnokb7sAJBQbV5dqalO3kU0JwvcGwhO3hQ" - ], - "Content-Type": [ - "application/json; odata=minimalmetadata; charset=utf-8" - ], - "client-request-id": [ - "41e5bc22-c372-11e9-8009-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "471" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:11 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "b67fd7ee-0fbf-4f2e-9e21-050537cc7980" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#taskaddresult\",\"value\":[\r\n {\r\n \"status\":\"Success\",\"taskId\":\"myTask\",\"eTag\":\"0x8D72596266B7123\",\"lastModified\":\"2019-08-20T17:45:11.6112163Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604-1/tasks/myTask\"\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604-1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "423ac0a6-c372-11e9-a981-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:11 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:GYi8g5Cycczhzv/MC4FhfWAk7iiQ9knRkD38XxYuul4=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:11 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962617779D" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:11 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "e180a6f6-5dfe-4143-a066-0cb2956fd60b" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#jobs/@Element\",\"id\":\"ncj-ubuntu1604-1\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604-1\",\"eTag\":\"0x8D725962617779D\",\"lastModified\":\"2019-08-20T17:45:11.0608797Z\",\"creationTime\":\"2019-08-20T17:45:11.0469939Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:11.0608797Z\",\"priority\":0,\"usesTaskDependencies\":false,\"constraints\":{\r\n \"maxWallClockTime\":\"P10675199DT2H48M5.4775807S\",\"maxTaskRetryCount\":0\r\n },\"poolInfo\":{\r\n \"poolId\":\"ncj-ubuntu1604\"\r\n },\"executionInfo\":{\r\n \"startTime\":\"2019-08-20T17:45:11.0608797Z\",\"poolId\":\"ncj-ubuntu1604\"\r\n },\"onAllTasksComplete\":\"noaction\",\"onTaskFailure\":\"noaction\"\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604-1/tasks?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "42447acc-c372-11e9-b1e5-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:11 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:uzooMKAK+UFzRA+iUErDxX8EdMJBrU/IJrG8RrS/B5g=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:11 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "09a8cbe9-cda5-42e0-9608-b440c544f700" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#tasks\",\"value\":[\r\n {\r\n \"id\":\"myTask\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604-1/tasks/myTask\",\"eTag\":\"0x8D72596266B7123\",\"creationTime\":\"2019-08-20T17:45:11.6112163Z\",\"lastModified\":\"2019-08-20T17:45:11.6112163Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:11.6112163Z\",\"commandLine\":\"/bin/bash -c \\\"echo test\\\"\",\"outputFiles\":[\r\n {\r\n \"filePattern\":\"$AZ_BATCH_TASK_DIR/*.txt\",\"destination\":{\r\n \"container\":{\r\n \"containerUrl\":\"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A30%3A09Z&se=2019-08-27T17%3A45%3A09Z&sp=w&sv=2017-07-29&sr=c&sig=nij98v6B3zhcV68rPGSG6wsdfjvePrD8MdMA%2BdWgM78%3D\"\r\n }\r\n },\"uploadOptions\":{\r\n \"uploadCondition\":\"TaskSuccess\"\r\n }\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"nonadmin\"\r\n }\r\n },\"constraints\":{\r\n \"maxWallClockTime\":\"P10675199DT2H48M5.4775807S\",\"retentionTime\":\"PT1H\",\"maxTaskRetryCount\":0\r\n },\"executionInfo\":{\r\n \"retryCount\":0,\"requeueCount\":0\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604-1/tasks?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "4418ebd2-c372-11e9-9359-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:14 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:2DE1XgHgXeHvj+bXKMOaTi/dEDolM/ddhCA9gx1QY5M=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:14 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "89dfc6fb-093a-47f1-9341-9f1ba59e7668" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#tasks\",\"value\":[\r\n {\r\n \"id\":\"myTask\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604-1/tasks/myTask\",\"eTag\":\"0x8D72596266B7123\",\"creationTime\":\"2019-08-20T17:45:11.6112163Z\",\"lastModified\":\"2019-08-20T17:45:11.6112163Z\",\"state\":\"completed\",\"stateTransitionTime\":\"2019-08-20T17:45:13.204694Z\",\"previousState\":\"running\",\"previousStateTransitionTime\":\"2019-08-20T17:45:12.459832Z\",\"commandLine\":\"/bin/bash -c \\\"echo test\\\"\",\"outputFiles\":[\r\n {\r\n \"filePattern\":\"$AZ_BATCH_TASK_DIR/*.txt\",\"destination\":{\r\n \"container\":{\r\n \"containerUrl\":\"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A30%3A09Z&se=2019-08-27T17%3A45%3A09Z&sp=w&sv=2017-07-29&sr=c&sig=nij98v6B3zhcV68rPGSG6wsdfjvePrD8MdMA%2BdWgM78%3D\"\r\n }\r\n },\"uploadOptions\":{\r\n \"uploadCondition\":\"TaskSuccess\"\r\n }\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"nonadmin\"\r\n }\r\n },\"constraints\":{\r\n \"maxWallClockTime\":\"P10675199DT2H48M5.4775807S\",\"retentionTime\":\"PT1H\",\"maxTaskRetryCount\":0\r\n },\"executionInfo\":{\r\n \"startTime\":\"2019-08-20T17:45:12.459832Z\",\"endTime\":\"2019-08-20T17:45:13.204694Z\",\"exitCode\":0,\"result\":\"success\",\"retryCount\":0,\"requeueCount\":0\r\n },\"nodeInfo\":{\r\n \"affinityId\":\"TVM:tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"nodeUrl\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604/nodes/tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"poolId\":\"ncj-ubuntu1604\",\"nodeId\":\"tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"taskRootDirectory\":\"workitems/ncj-ubuntu1604-1/job-1/myTask\",\"taskRootDirectoryUrl\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604/nodes/tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d/files/workitems/ncj-ubuntu1604-1/job-1/myTask\"\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604-1/tasks/myTask?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "44226e40-c372-11e9-979c-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:14 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:+sAC6seGiXdmK0QUFayRgjscmY7fj5DrzCfjoUD+2fk=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:14 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D72596266B7123" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:11 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "e9c88c9c-79ea-419b-8e7d-b9f769030406" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#tasks/@Element\",\"id\":\"myTask\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604-1/tasks/myTask\",\"eTag\":\"0x8D72596266B7123\",\"creationTime\":\"2019-08-20T17:45:11.6112163Z\",\"lastModified\":\"2019-08-20T17:45:11.6112163Z\",\"state\":\"completed\",\"stateTransitionTime\":\"2019-08-20T17:45:13.204694Z\",\"previousState\":\"running\",\"previousStateTransitionTime\":\"2019-08-20T17:45:12.459832Z\",\"commandLine\":\"/bin/bash -c \\\"echo test\\\"\",\"outputFiles\":[\r\n {\r\n \"filePattern\":\"$AZ_BATCH_TASK_DIR/*.txt\",\"destination\":{\r\n \"container\":{\r\n \"containerUrl\":\"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A30%3A09Z&se=2019-08-27T17%3A45%3A09Z&sp=w&sv=2017-07-29&sr=c&sig=nij98v6B3zhcV68rPGSG6wsdfjvePrD8MdMA%2BdWgM78%3D\"\r\n }\r\n },\"uploadOptions\":{\r\n \"uploadCondition\":\"TaskSuccess\"\r\n }\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"nonadmin\"\r\n }\r\n },\"constraints\":{\r\n \"maxWallClockTime\":\"P10675199DT2H48M5.4775807S\",\"retentionTime\":\"PT1H\",\"maxTaskRetryCount\":0\r\n },\"executionInfo\":{\r\n \"startTime\":\"2019-08-20T17:45:12.459832Z\",\"endTime\":\"2019-08-20T17:45:13.204694Z\",\"exitCode\":0,\"result\":\"success\",\"retryCount\":0,\"requeueCount\":0\r\n },\"nodeInfo\":{\r\n \"affinityId\":\"TVM:tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"nodeUrl\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604/nodes/tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"poolId\":\"ncj-ubuntu1604\",\"nodeId\":\"tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d\",\"taskRootDirectory\":\"workitems/ncj-ubuntu1604-1/job-1/myTask\",\"taskRootDirectoryUrl\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-ubuntu1604/nodes/tvmps_453be07f8833a32514145f65c262af27329214bd3876c9903242c14905bf89d4_d/files/workitems/ncj-ubuntu1604-1/job-1/myTask\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-output?restype=container&comp=list", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "442e9122-c372-11e9-9417-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:45:14 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:x3VlCtM/x0xgsyc0czlkO0eMd0h1MQBYhaKvQ8O4HKE=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:14 GMT" - ] - }, - "body": { - "string": "\ufefffileuploaderr.txtTue, 20 Aug 2019 17:45:13 GMT0x8D7259627577AB80application/octet-stream1B2M2Y8AsgTpgAmY7PhCfg==BlockBlobHottrueunlockedavailabletruefileuploadout.txtTue, 20 Aug 2019 17:45:12 GMT0x8D72596272F03DF512application/octet-streamwyfwc69jKJUl+IdiflQalw==BlockBlobHottrueunlockedavailabletruestderr.txtTue, 20 Aug 2019 17:45:12 GMT0x8D725962730B1DC0application/octet-stream1B2M2Y8AsgTpgAmY7PhCfg==BlockBlobHottrueunlockedavailabletruestdout.txtTue, 20 Aug 2019 17:45:12 GMT0x8D725962733234D5application/octet-stream2Oj8otwPiW/Xy0ywAxuiSQ==BlockBlobHottrueunlockedavailabletrue" - } - } - }, - { - "request": { - "method": "DELETE", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-ubuntu1604-1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "443ad030-c372-11e9-8287-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "0" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:14 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:2d6k4MNm6y9zXvGsQ6qSCL3bI+OD+NXUH6soqYP3kNc=" - ] - } - }, - "response": { - "status": { - "code": 202, - "message": "Accepted" - }, - "headers": { - "Date": [ - "Tue, 20 Aug 2019 17:45:14 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "d733f566-2eee-4150-a93a-c01e6352e8e5" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-output?restype=container&comp=list", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "44436798-c372-11e9-8c1b-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:45:15 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:2qk1GtO2Qz8TiYWpSTP3EoEPpBNnLtjftVnaBNzCQAM=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:14 GMT" - ] - }, - "body": { - "string": "\ufefffileuploaderr.txtTue, 20 Aug 2019 17:45:13 GMT0x8D7259627577AB80application/octet-stream1B2M2Y8AsgTpgAmY7PhCfg==BlockBlobHottrueunlockedavailabletruefileuploadout.txtTue, 20 Aug 2019 17:45:12 GMT0x8D72596272F03DF512application/octet-streamwyfwc69jKJUl+IdiflQalw==BlockBlobHottrueunlockedavailabletruestderr.txtTue, 20 Aug 2019 17:45:12 GMT0x8D725962730B1DC0application/octet-stream1B2M2Y8AsgTpgAmY7PhCfg==BlockBlobHottrueunlockedavailabletruestdout.txtTue, 20 Aug 2019 17:45:12 GMT0x8D725962733234D5application/octet-stream2Oj8otwPiW/Xy0ywAxuiSQ==BlockBlobHottrueunlockedavailabletrue" - } - } - }, - { - "request": { - "method": "DELETE", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-output/fileuploaderr.txt", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "444fca3e-c372-11e9-9a80-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:45:15 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:aUXLqH7Q/+C4lOihJgOYj+jGdA/aLmcMBLnEp1fyzGY=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 202, - "message": "Accepted" - }, - "headers": { - "Date": [ - "Tue, 20 Aug 2019 17:45:14 GMT" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "x-ms-delete-type-permanent": [ - "true" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "DELETE", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-output/fileuploadout.txt", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "4457b058-c372-11e9-bc94-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:45:15 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:nrvYabCdvNYiXTcv5dE10UEpJa5FNQVY4HMIxMKmvMw=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 202, - "message": "Accepted" - }, - "headers": { - "Date": [ - "Tue, 20 Aug 2019 17:45:14 GMT" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "x-ms-delete-type-permanent": [ - "true" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "DELETE", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-output/stderr.txt", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "445f51a8-c372-11e9-ac85-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:45:15 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:76B7T3kYtjypQAeUP7jLcLeLVmZwqIpFZDoQ9Tn7zKY=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 202, - "message": "Accepted" - }, - "headers": { - "Date": [ - "Tue, 20 Aug 2019 17:45:14 GMT" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "x-ms-delete-type-permanent": [ - "true" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "DELETE", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-output/stdout.txt", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "4466ca7e-c372-11e9-89e4-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:45:15 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:BZb/FO9L0YjFH0KUNy9bfPKh71O+zrKCvun63Ix6Dak=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 202, - "message": "Accepted" - }, - "headers": { - "Date": [ - "Tue, 20 Aug 2019 17:45:14 GMT" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "x-ms-delete-type-permanent": [ - "true" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/supportedimages?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "446ee9e2-c372-11e9-8150-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:15 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:HeMf6GhqUC9nZHLofWCNXvDDzAXG2xLA4amGN8KJ+5M=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:14 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "68035f8f-41ca-496b-adef-2211f3f2fa2d" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#supportedimages\",\"value\":[\r\n {\r\n \"imageReference\":{\r\n \"publisher\":\"batch\",\"offer\":\"rendering-centos73\",\"sku\":\"rendering\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"batch\",\"offer\":\"rendering-windows2016\",\"sku\":\"rendering\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04-lts\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"16.04.0-lts\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"canonical\",\"offer\":\"ubuntuserver\",\"sku\":\"18.04-lts\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 18.04\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"credativ\",\"offer\":\"debian\",\"sku\":\"8\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.debian 8\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"credativ\",\"offer\":\"debian\",\"sku\":\"8-backports\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.debian 8\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"credativ\",\"offer\":\"debian\",\"sku\":\"9\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.debian 9\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"credativ\",\"offer\":\"debian\",\"sku\":\"9-backports\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.debian 9\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-ads\",\"offer\":\"linux-data-science-vm\",\"sku\":\"linuxdsvm\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-ads\",\"offer\":\"standard-data-science-vm\",\"sku\":\"standard-data-science-vm\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"centos-container\",\"sku\":\"7-4\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"NvidiaGridDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"centos-container\",\"sku\":\"7-5\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"NvidiaGridDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"centos-container\",\"sku\":\"7-6\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"NvidiaGridDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"centos-container-rdma\",\"sku\":\"7-4\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"SupportsRDMAOnly\",\"IntelMPIRuntimeInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"ubuntu-server-container\",\"sku\":\"16-04-lts\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"NvidiaGridDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-azure-batch\",\"offer\":\"ubuntu-server-container-rdma\",\"sku\":\"16-04-lts\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\",\"SupportsRDMAOnly\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-dsvm\",\"offer\":\"azureml\",\"sku\":\"runtime\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-dsvm\",\"offer\":\"dsvm-windows\",\"sku\":\"server-2016\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoft-dsvm\",\"offer\":\"linux-data-science-vm-ubuntu\",\"sku\":\"linuxdsvmubuntu\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\",\"capabilities\":[\r\n \"DockerCompatible\",\"NvidiaTeslaDriverInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2008-r2-sp1\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2008-r2-sp1-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-datacenter\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-datacenter-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2016-datacenter\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2016-datacenter-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2016-datacenter-with-containers\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-core\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-core-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-core-with-containers\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-core-with-containers-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-with-containers\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2019-datacenter-with-containers-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserversemiannual\",\"sku\":\"datacenter-core-1709-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserversemiannual\",\"sku\":\"datacenter-core-1709-with-containers-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserversemiannual\",\"sku\":\"datacenter-core-1803-with-containers-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserversemiannual\",\"sku\":\"datacenter-core-1809-with-containers-smalldisk\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.windows amd64\",\"capabilities\":[\r\n \"DockerCompatible\"\r\n ],\"osType\":\"windows\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.0\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.1\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.2\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.3\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.4\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.5\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos\",\"sku\":\"7.6\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos-hpc\",\"sku\":\"7.1\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"SupportsRDMAOnly\",\"IntelMPIRuntimeInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos-hpc\",\"sku\":\"7.3\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"SupportsRDMAOnly\",\"IntelMPIRuntimeInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos-hpc\",\"sku\":\"7.4\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"SupportsRDMAOnly\",\"IntelMPIRuntimeInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"openlogic\",\"offer\":\"centos-hpc\",\"sku\":\"7.6\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"capabilities\":[\r\n \"SupportsRDMAOnly\",\"IntelMPIRuntimeInstalled\"\r\n ],\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"oracle\",\"offer\":\"oracle-linux\",\"sku\":\"7.3\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"oracle\",\"offer\":\"oracle-linux\",\"sku\":\"7.4\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"oracle\",\"offer\":\"oracle-linux\",\"sku\":\"7.5\",\"version\":\"latest\"\r\n },\"verificationType\":\"unverified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n },{\r\n \"imageReference\":{\r\n \"publisher\":\"oracle\",\"offer\":\"oracle-linux\",\"sku\":\"7.6\",\"version\":\"latest\"\r\n },\"verificationType\":\"verified\",\"nodeAgentSKUId\":\"batch.node.centos 7\",\"osType\":\"linux\"\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools?api-version=2019-08-01.10.0", - "body": "{\"id\": \"ncj-windows-2012-r2\", \"vmSize\": \"STANDARD_D1_V2\", \"virtualMachineConfiguration\": {\"imageReference\": {\"publisher\": \"microsoftwindowsserver\", \"offer\": \"windowsserver\", \"sku\": \"2012-r2-datacenter\"}, \"nodeAgentSKUId\": \"batch.node.windows amd64\"}, \"targetDedicatedNodes\": 1}", - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Content-Type": [ - "application/json; odata=minimalmetadata; charset=utf-8" - ], - "client-request-id": [ - "447ad4cc-c372-11e9-91ea-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "277" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:15 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:MYPpSapN7cxQpDcyyhM+QitUie8hQ8biclNO7XpQCiM=" - ] - } - }, - "response": { - "status": { - "code": 409, - "message": "The specified pool already exists." - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:14 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "9a5644b3-e6be-4308-a4f7-5c7fd231165c" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ], - "Content-Length": [ - "334" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#Microsoft.Azure.Batch.Protocol.Entities.Container.errors/@Element\",\"code\":\"PoolExists\",\"message\":{\r\n \"lang\":\"en-US\",\"value\":\"The specified pool already exists.\\nRequestId:9a5644b3-e6be-4308-a4f7-5c7fd231165c\\nTime:2019-08-20T17:45:15.4699564Z\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "44866942-c372-11e9-ac75-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:15 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:NIaG0PnfWRfpY1MNixscIgIiFKBcr1vfLNzIObX9sHg=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:14 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725957DFB0158" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:40:29 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "a1b930b6-ff15-419f-8df5-e6f7c836ace6" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"ncj-windows-2012-r2\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2\",\"eTag\":\"0x8D725957DFB0158\",\"lastModified\":\"2019-08-20T17:40:29.0171224Z\",\"creationTime\":\"2019-08-20T17:40:29.0171224Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:40:29.0171224Z\",\"allocationState\":\"steady\",\"allocationStateTransitionTime\":\"2019-08-20T17:41:45.3536086Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":1,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"microsoftwindowsserver\",\"offer\":\"windowsserver\",\"sku\":\"2012-r2-datacenter\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.windows amd64\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "448dee90-c372-11e9-a2e6-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:15 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:BYIeN7EVjONerdTZbysxRso2w8Y/5SodQSrJYInSw7k=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:14 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "37dd8839-b7db-4ab8-b71d-05870676743e" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#nodes\",\"value\":[\r\n {\r\n \"id\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"state\":\"idle\",\"schedulingState\":\"enabled\",\"stateTransitionTime\":\"2019-08-20T17:44:53.998613Z\",\"lastBootTime\":\"2019-08-20T17:44:47.433702Z\",\"allocationTime\":\"2019-08-20T17:41:44.5757579Z\",\"ipAddress\":\"10.0.0.4\",\"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"vmSize\":\"standard_d1_v2\",\"totalTasksRun\":1,\"totalTasksSucceeded\":1,\"runningTasksCount\":0,\"recentTasks\":[\r\n {\r\n \"taskUrl\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2/tasks/myTask\",\"jobId\":\"ncj-windows-2012-r2\",\"taskId\":\"myTask\",\"taskState\":\"completed\",\"executionInfo\":{\r\n \"startTime\":\"2019-08-20T17:44:53.042985Z\",\"endTime\":\"2019-08-20T17:44:53.967174Z\",\"exitCode\":0,\"result\":\"success\",\"retryCount\":0,\"requeueCount\":0\r\n }\r\n }\r\n ],\"certificateReferences\":[\r\n \r\n ],\"isDedicated\":true,\"endpointConfiguration\":{\r\n \"inboundEndpoints\":[\r\n {\r\n \"name\":\"SSHRule.0\",\"protocol\":\"tcp\",\"publicIPAddress\":\"52.159.21.30\",\"publicFQDN\":\"dns1e3cf4d6-e900-4b6b-83dd-f0405cfffd73-azurebatch-cloudservice.westcentralus.cloudapp.azure.com\",\"frontendPort\":50000,\"backendPort\":3389\r\n }\r\n ]\r\n },\"nodeAgentInfo\":{\r\n \"lastUpdateTime\":\"2019-08-20T17:44:47.433702Z\",\"version\":\"1.6.4\"\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Batch/batchAccounts?api-version=2019-08-01", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-mgmt-batch/7.0.0 Azure-SDK-For-Python" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL21hbmFnZW1lbnQuY29yZS53aW5kb3dzLm5ldC8iLCJpc3MiOiJodHRwczovL3N0cy53aW5kb3dzLm5ldC83MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDcvIiwiaWF0IjoxNTY2MzIwNzE4LCJuYmYiOjE1NjYzMjA3MTgsImV4cCI6MTU2NjMyNDYxOCwiX2NsYWltX25hbWVzIjp7Imdyb3VwcyI6InNyYzEifSwiX2NsYWltX3NvdXJjZXMiOnsic3JjMSI6eyJlbmRwb2ludCI6Imh0dHBzOi8vZ3JhcGgud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3L3VzZXJzLzI3MjQ0OTM1LWE0YjktNDBhNC1hMjcyLTQyOTQyYjY3ZWNmMS9nZXRNZW1iZXJPYmplY3RzIn19LCJhY3IiOiIxIiwiYWlvIjoiQVVRQXUvOE1BQUFBT0lvTEVUclJGWnErQmFoaVNrVmhNNXR5QzYwSDZBSDNlZG5vMGJQbTFRYUtvV3Rva01QaDdiZjIvM0VFZ0NHbmo0UFFWY3FHaXdVbkFQYjRONmZwZ1E9PSIsImFtciI6WyJyc2EiLCJtZmEiXSwiYXBwaWQiOiIwNGIwNzc5NS04ZGRiLTQ2MWEtYmJlZS0wMmY5ZTFiZjdiNDYiLCJhcHBpZGFjciI6IjAiLCJkZXZpY2VpZCI6IjFkNTBjNWRmLWZkMDEtNGE2NC04ODU5LTg0NzIzNzU4YTI0NCIsImZhbWlseV9uYW1lIjoiS2xlaW4iLCJnaXZlbl9uYW1lIjoiQnJhbmRvbiIsImlwYWRkciI6IjEzMS4xMDcuMTU5LjIyIiwibmFtZSI6IkJyYW5kb24gS2xlaW4iLCJvaWQiOiIyNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEiLCJvbnByZW1fc2lkIjoiUy0xLTUtMjEtMjEyNzUyMTE4NC0xNjA0MDEyOTIwLTE4ODc5Mjc1MjctMzA4NjkxNzQiLCJwdWlkIjoiMTAwMzAwMDBBOTE3Nzg5RSIsInNjcCI6InVzZXJfaW1wZXJzb25hdGlvbiIsInN1YiI6IjMtaVZMWlVxZzhyWVVFNHlLRXZPSktES0N2Z1I0SVJvQXJhVzlRWmJNRkEiLCJ0aWQiOiI3MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDciLCJ1bmlxdWVfbmFtZSI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInVwbiI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInV0aSI6IlZkQ05pT2w3Z0UyWkw3QTVBMFFPQUEiLCJ2ZXIiOiIxLjAifQ.XjlVAUievRf_e8bKWsAY7Ca1e2RR2FIB4PpXBKa6Vzy5xfZ_c33OFQWkB610FXt-E86jl61B0siTx1aVQQbXt9iAdqcfb27MKeDX_sXi_BjTUIA6xgfRm1CnG8vFq_GpLPy0GIgzuQkaPqPifXIz39SzMavmrLaAp5Ct1j09e9yXwcIxLhSRg_WibgqY22tbcremd_-y9qZex3xEzc798Nz62_AADDKgBjivlwxGX5TpOiEZxhNhD6pS4nlTJ4eiyS7mFRC1nIGB1SMZrgnWjQ5dRcib_7krgdW_4J-kqA-Tg4FGo8aPFBxjMADxfCOF04W2KykUZpLfF_9c2HZGoQ" - ], - "x-ms-client-request-id": [ - "44ba866e-c372-11e9-a07b-44032c851686" - ], - "accept-language": [ - "en-US" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json; charset=utf-8" - ], - "Expires": [ - "-1" - ], - "Vary": [ - "Accept-Encoding" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:16 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "content-length": [ - "2729" - ], - "x-ms-original-request-ids": [ - "1a4bba4d-043b-4e32-8cc3-4a641aa58b81", - "21497a40-928d-4fbe-be78-cd0823fb21b3" - ], - "Cache-Control": [ - "no-cache" - ], - "Pragma": [ - "no-cache" - ] - }, - "body": { - "string": "{\"value\":[{\"id\":\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Batch/batchAccounts/byossc\",\"name\":\"byossc\",\"type\":\"Microsoft.Batch/batchAccounts\",\"location\":\"southcentralus\",\"properties\":{\"accountEndpoint\":\"byossc.southcentralus.batch.azure.com\",\"provisioningState\":\"Succeeded\",\"dedicatedCoreQuotaPerVMFamilyEnforced\":false,\"poolQuota\":100,\"activeJobAndJobScheduleQuota\":300,\"poolAllocationMode\":\"UserSubscription\",\"keyVaultReference\":{\"id\":\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.KeyVault/vaults/byossc\",\"url\":\"https://byossc.vault.azure.net/\"}}},{\"id\":\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Batch/batchAccounts/sdktest2\",\"name\":\"sdktest2\",\"type\":\"Microsoft.Batch/batchAccounts\",\"location\":\"westcentralus\",\"properties\":{\"accountEndpoint\":\"sdktest2.westcentralus.batch.azure.com\",\"provisioningState\":\"Succeeded\",\"dedicatedCoreQuota\":20,\"dedicatedCoreQuotaPerVMFamily\":[{\"name\":\"standardA0_A7Family\",\"coreQuota\":20},{\"name\":\"standardDv2Family\",\"coreQuota\":20},{\"name\":\"standardA8_A11Family\",\"coreQuota\":0},{\"name\":\"standardDFamily\",\"coreQuota\":0},{\"name\":\"standardGFamily\",\"coreQuota\":0},{\"name\":\"basicAFamily\",\"coreQuota\":0},{\"name\":\"standardFFamily\",\"coreQuota\":0},{\"name\":\"standardNVFamily\",\"coreQuota\":0},{\"name\":\"standardNVPromoFamily\",\"coreQuota\":0},{\"name\":\"standardNCFamily\",\"coreQuota\":0},{\"name\":\"standardNCPromoFamily\",\"coreQuota\":0},{\"name\":\"standardHFamily\",\"coreQuota\":0},{\"name\":\"standardHPromoFamily\",\"coreQuota\":0},{\"name\":\"standardAv2Family\",\"coreQuota\":0},{\"name\":\"standardMSFamily\",\"coreQuota\":0},{\"name\":\"standardDv3Family\",\"coreQuota\":0},{\"name\":\"standardEv3Family\",\"coreQuota\":0},{\"name\":\"standardDSFamily\",\"coreQuota\":0},{\"name\":\"standardDSv2Family\",\"coreQuota\":0},{\"name\":\"standardDSv3Family\",\"coreQuota\":0},{\"name\":\"standardFSFamily\",\"coreQuota\":0},{\"name\":\"standardESv3Family\",\"coreQuota\":0},{\"name\":\"standardGSFamily\",\"coreQuota\":0},{\"name\":\"standardLSFamily\",\"coreQuota\":0},{\"name\":\"standardNCSv2Family\",\"coreQuota\":0},{\"name\":\"standardNDSFamily\",\"coreQuota\":0},{\"name\":\"standardNCSv3Family\",\"coreQuota\":0},{\"name\":\"standardFSv2Family\",\"coreQuota\":0},{\"name\":\"standardHBSFamily\",\"coreQuota\":0},{\"name\":\"standardHCSFamily\",\"coreQuota\":0}],\"dedicatedCoreQuotaPerVMFamilyEnforced\":false,\"lowPriorityCoreQuota\":100,\"poolQuota\":100,\"activeJobAndJobScheduleQuota\":300,\"autoStorage\":{\"storageAccountId\":\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Storage/storageAccounts/sdkteststore2\",\"lastKeySync\":\"2019-07-16T21:55:40.4909987Z\"},\"poolAllocationMode\":\"BatchService\"},\"tags\":{\"rawr\":\"test\"}}]}" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Storage/storageAccounts/sdkteststore2/listKeys?api-version=2018-02-01", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-mgmt-storage/2.0.0 Azure-SDK-For-Python" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL21hbmFnZW1lbnQuY29yZS53aW5kb3dzLm5ldC8iLCJpc3MiOiJodHRwczovL3N0cy53aW5kb3dzLm5ldC83MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDcvIiwiaWF0IjoxNTY2MzIwNzE4LCJuYmYiOjE1NjYzMjA3MTgsImV4cCI6MTU2NjMyNDYxOCwiX2NsYWltX25hbWVzIjp7Imdyb3VwcyI6InNyYzEifSwiX2NsYWltX3NvdXJjZXMiOnsic3JjMSI6eyJlbmRwb2ludCI6Imh0dHBzOi8vZ3JhcGgud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3L3VzZXJzLzI3MjQ0OTM1LWE0YjktNDBhNC1hMjcyLTQyOTQyYjY3ZWNmMS9nZXRNZW1iZXJPYmplY3RzIn19LCJhY3IiOiIxIiwiYWlvIjoiQVVRQXUvOE1BQUFBT0lvTEVUclJGWnErQmFoaVNrVmhNNXR5QzYwSDZBSDNlZG5vMGJQbTFRYUtvV3Rva01QaDdiZjIvM0VFZ0NHbmo0UFFWY3FHaXdVbkFQYjRONmZwZ1E9PSIsImFtciI6WyJyc2EiLCJtZmEiXSwiYXBwaWQiOiIwNGIwNzc5NS04ZGRiLTQ2MWEtYmJlZS0wMmY5ZTFiZjdiNDYiLCJhcHBpZGFjciI6IjAiLCJkZXZpY2VpZCI6IjFkNTBjNWRmLWZkMDEtNGE2NC04ODU5LTg0NzIzNzU4YTI0NCIsImZhbWlseV9uYW1lIjoiS2xlaW4iLCJnaXZlbl9uYW1lIjoiQnJhbmRvbiIsImlwYWRkciI6IjEzMS4xMDcuMTU5LjIyIiwibmFtZSI6IkJyYW5kb24gS2xlaW4iLCJvaWQiOiIyNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEiLCJvbnByZW1fc2lkIjoiUy0xLTUtMjEtMjEyNzUyMTE4NC0xNjA0MDEyOTIwLTE4ODc5Mjc1MjctMzA4NjkxNzQiLCJwdWlkIjoiMTAwMzAwMDBBOTE3Nzg5RSIsInNjcCI6InVzZXJfaW1wZXJzb25hdGlvbiIsInN1YiI6IjMtaVZMWlVxZzhyWVVFNHlLRXZPSktES0N2Z1I0SVJvQXJhVzlRWmJNRkEiLCJ0aWQiOiI3MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDciLCJ1bmlxdWVfbmFtZSI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInVwbiI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInV0aSI6IlZkQ05pT2w3Z0UyWkw3QTVBMFFPQUEiLCJ2ZXIiOiIxLjAifQ.XjlVAUievRf_e8bKWsAY7Ca1e2RR2FIB4PpXBKa6Vzy5xfZ_c33OFQWkB610FXt-E86jl61B0siTx1aVQQbXt9iAdqcfb27MKeDX_sXi_BjTUIA6xgfRm1CnG8vFq_GpLPy0GIgzuQkaPqPifXIz39SzMavmrLaAp5Ct1j09e9yXwcIxLhSRg_WibgqY22tbcremd_-y9qZex3xEzc798Nz62_AADDKgBjivlwxGX5TpOiEZxhNhD6pS4nlTJ4eiyS7mFRC1nIGB1SMZrgnWjQ5dRcib_7krgdW_4J-kqA-Tg4FGo8aPFBxjMADxfCOF04W2KykUZpLfF_9c2HZGoQ" - ], - "Content-Type": [ - "application/json; charset=utf-8" - ], - "x-ms-client-request-id": [ - "44fc2042-c372-11e9-8aee-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json" - ], - "Expires": [ - "-1" - ], - "Vary": [ - "Accept-Encoding" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:16 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "Server": [ - "Microsoft-Azure-Storage-Resource-Provider/1.0,Microsoft-HTTPAPI/2.0 Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "Cache-Control": [ - "no-cache" - ], - "x-ms-ratelimit-remaining-subscription-writes": [ - "1199" - ], - "Pragma": [ - "no-cache" - ], - "content-length": [ - "288" - ] - }, - "body": { - "string": "{\"keys\":[{\"keyName\":\"key1\",\"value\":\"abc==\",\"permissions\":\"FULL\"},{\"keyName\":\"key2\",\"value\":\"def==\",\"permissions\":\"FULL\"}]}" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-output?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "4524af9c-c372-11e9-81f1-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:45:16 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:sV/dojC3vEOKaPoV7vK/EIjr+KTkokgD49Wgqa//HnE=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 409, - "message": "The specified container already exists." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "ContainerAlreadyExists" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:15 GMT" - ], - "Content-Length": [ - "230" - ] - }, - "body": { - "string": "\ufeffContainerAlreadyExistsThe specified container already exists.\nRequestId:fb9e174e-c01e-003e-6b7f-57d221000000\nTime:2019-08-20T17:45:16.6939433Z" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs?api-version=2019-08-01.10.0&timeout=30", - "body": "{\"id\": \"ncj-windows-2012-r2-1\", \"poolInfo\": {\"poolId\": \"ncj-windows-2012-r2\"}}", - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL2JhdGNoLmNvcmUud2luZG93cy5uZXQvIiwiaXNzIjoiaHR0cHM6Ly9zdHMud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3LyIsImlhdCI6MTU2NjMyMDcyNCwibmJmIjoxNTY2MzIwNzI0LCJleHAiOjE1NjYzMjQ2MjQsIl9jbGFpbV9uYW1lcyI6eyJncm91cHMiOiJzcmMxIn0sIl9jbGFpbV9zb3VyY2VzIjp7InNyYzEiOnsiZW5kcG9pbnQiOiJodHRwczovL2dyYXBoLndpbmRvd3MubmV0LzcyZjk4OGJmLTg2ZjEtNDFhZi05MWFiLTJkN2NkMDExZGI0Ny91c2Vycy8yNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEvZ2V0TWVtYmVyT2JqZWN0cyJ9fSwiYWNyIjoiMSIsImFpbyI6IkFVUUF1LzhNQUFBQUJmSVhrS1pUNXN2dGVyVzhpeVgyQ1JCODlJc2dTVFJtZFdPeHR0aFNMVXZzZEtwd0YxTmloNjFtcEdMYjRnNmxES01Md0lMTmtBSkhCblBCSithdU5BPT0iLCJhbXIiOlsicnNhIiwibWZhIl0sImFwcGlkIjoiMDRiMDc3OTUtOGRkYi00NjFhLWJiZWUtMDJmOWUxYmY3YjQ2IiwiYXBwaWRhY3IiOiIwIiwiZGV2aWNlaWQiOiIxZDUwYzVkZi1mZDAxLTRhNjQtODg1OS04NDcyMzc1OGEyNDQiLCJmYW1pbHlfbmFtZSI6IktsZWluIiwiZ2l2ZW5fbmFtZSI6IkJyYW5kb24iLCJpcGFkZHIiOiIxMzEuMTA3LjE1OS4yMiIsIm5hbWUiOiJCcmFuZG9uIEtsZWluIiwib2lkIjoiMjcyNDQ5MzUtYTRiOS00MGE0LWEyNzItNDI5NDJiNjdlY2YxIiwib25wcmVtX3NpZCI6IlMtMS01LTIxLTIxMjc1MjExODQtMTYwNDAxMjkyMC0xODg3OTI3NTI3LTMwODY5MTc0IiwicHVpZCI6IjEwMDMwMDAwQTkxNzc4OUUiLCJzY3AiOiJ1c2VyX2ltcGVyc29uYXRpb24iLCJzdWIiOiJaTnRJSW14ajVlSk9TbnJRTXh1UTFGeGVPOHhiYnVhQmFrU0FYYjRqRE84IiwidGlkIjoiNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3IiwidW5pcXVlX25hbWUiOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1cG4iOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1dGkiOiIzcU02WE1IMS1VeTc1OGREc3VFUUFBIiwidmVyIjoiMS4wIn0.6wF-URC5pN8R6lYNu887Vqul47X3Kpm5g_d0S6tYjtW42KcCv95dvXiWr3_xQ62vDBHLekWJUFTDt-JIa-7Jothw-k4LGIe4OyT3c5VeLMupH5fepX8puj3cfxUAubdUIwq3nw8XHksD979tOyFh_lOCyHPNs69UgpQUufHkX-262eCQjlQoXTigdmxd4uhW7ybcLKxTyIh16K8JI3tHU6lQQDeKGDVqgkXTWqAHWhlHiaZ8SYsfjV07lLS-YnBmjyM16WHnDCaUwDy326rKfbdsAS2r6br2NERDpX_yoq01rFxP1mzQrnokb7sAJBQbV5dqalO3kU0JwvcGwhO3hQ" - ], - "Content-Type": [ - "application/json; odata=minimalmetadata; charset=utf-8" - ], - "client-request-id": [ - "45414f66-c372-11e9-8570-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "return-client-request-id": [ - "false" - ], - "Content-Length": [ - "78" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "DataServiceId": [ - "https://sdktest2.westcentralus.batch.azure.com/jobs/job-1" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:16 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "ETag": [ - "0x8D72596299AC0BF" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:16 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Location": [ - "https://sdktest2.westcentralus.batch.azure.com/jobs/job-1" - ], - "Transfer-Encoding": [ - "chunked" - ], - "request-id": [ - "53478f48-65be-4ca7-b7b8-999655f48af6" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2-1/addtaskcollection?api-version=2019-08-01.10.0", - "body": "{\"value\": [{\"id\": \"myTask\", \"commandLine\": \"cmd /c echo | set /p dummy=test\", \"outputFiles\": [{\"filePattern\": \"$AZ_BATCH_TASK_DIR/*.txt\", \"destination\": {\"container\": {\"containerUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A30%3A16Z&se=2019-08-27T17%3A45%3A16Z&sp=w&sv=2017-07-29&sr=c&sig=bVYOPdfrMPuJKZS0wQROeVCiAnU8Kxrjj1SRBO3gp5M%3D\"}}, \"uploadOptions\": {\"uploadCondition\": \"tasksuccess\"}}], \"constraints\": {\"retentionTime\": \"PT1H\"}}]}", - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL2JhdGNoLmNvcmUud2luZG93cy5uZXQvIiwiaXNzIjoiaHR0cHM6Ly9zdHMud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3LyIsImlhdCI6MTU2NjMyMDcyNCwibmJmIjoxNTY2MzIwNzI0LCJleHAiOjE1NjYzMjQ2MjQsIl9jbGFpbV9uYW1lcyI6eyJncm91cHMiOiJzcmMxIn0sIl9jbGFpbV9zb3VyY2VzIjp7InNyYzEiOnsiZW5kcG9pbnQiOiJodHRwczovL2dyYXBoLndpbmRvd3MubmV0LzcyZjk4OGJmLTg2ZjEtNDFhZi05MWFiLTJkN2NkMDExZGI0Ny91c2Vycy8yNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEvZ2V0TWVtYmVyT2JqZWN0cyJ9fSwiYWNyIjoiMSIsImFpbyI6IkFVUUF1LzhNQUFBQUJmSVhrS1pUNXN2dGVyVzhpeVgyQ1JCODlJc2dTVFJtZFdPeHR0aFNMVXZzZEtwd0YxTmloNjFtcEdMYjRnNmxES01Md0lMTmtBSkhCblBCSithdU5BPT0iLCJhbXIiOlsicnNhIiwibWZhIl0sImFwcGlkIjoiMDRiMDc3OTUtOGRkYi00NjFhLWJiZWUtMDJmOWUxYmY3YjQ2IiwiYXBwaWRhY3IiOiIwIiwiZGV2aWNlaWQiOiIxZDUwYzVkZi1mZDAxLTRhNjQtODg1OS04NDcyMzc1OGEyNDQiLCJmYW1pbHlfbmFtZSI6IktsZWluIiwiZ2l2ZW5fbmFtZSI6IkJyYW5kb24iLCJpcGFkZHIiOiIxMzEuMTA3LjE1OS4yMiIsIm5hbWUiOiJCcmFuZG9uIEtsZWluIiwib2lkIjoiMjcyNDQ5MzUtYTRiOS00MGE0LWEyNzItNDI5NDJiNjdlY2YxIiwib25wcmVtX3NpZCI6IlMtMS01LTIxLTIxMjc1MjExODQtMTYwNDAxMjkyMC0xODg3OTI3NTI3LTMwODY5MTc0IiwicHVpZCI6IjEwMDMwMDAwQTkxNzc4OUUiLCJzY3AiOiJ1c2VyX2ltcGVyc29uYXRpb24iLCJzdWIiOiJaTnRJSW14ajVlSk9TbnJRTXh1UTFGeGVPOHhiYnVhQmFrU0FYYjRqRE84IiwidGlkIjoiNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3IiwidW5pcXVlX25hbWUiOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1cG4iOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1dGkiOiIzcU02WE1IMS1VeTc1OGREc3VFUUFBIiwidmVyIjoiMS4wIn0.6wF-URC5pN8R6lYNu887Vqul47X3Kpm5g_d0S6tYjtW42KcCv95dvXiWr3_xQ62vDBHLekWJUFTDt-JIa-7Jothw-k4LGIe4OyT3c5VeLMupH5fepX8puj3cfxUAubdUIwq3nw8XHksD979tOyFh_lOCyHPNs69UgpQUufHkX-262eCQjlQoXTigdmxd4uhW7ybcLKxTyIh16K8JI3tHU6lQQDeKGDVqgkXTWqAHWhlHiaZ8SYsfjV07lLS-YnBmjyM16WHnDCaUwDy326rKfbdsAS2r6br2NERDpX_yoq01rFxP1mzQrnokb7sAJBQbV5dqalO3kU0JwvcGwhO3hQ" - ], - "Content-Type": [ - "application/json; odata=minimalmetadata; charset=utf-8" - ], - "client-request-id": [ - "456929be-c372-11e9-8c04-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "474" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:17 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "c4bc77d9-2c4d-42dc-9843-7e9fbd0a0ec7" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#taskaddresult\",\"value\":[\r\n {\r\n \"status\":\"Success\",\"taskId\":\"myTask\",\"eTag\":\"0x8D7259629DC8103\",\"lastModified\":\"2019-08-20T17:45:17.3853443Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2-1/tasks/myTask\"\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2-1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "45abb300-c372-11e9-8af5-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:17 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:o6KUMWoXLdqzIk5JfpZBeP7S1GSYDuQ5zAFh6LAfdww=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:17 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D72596299AC0BF" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:16 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "350079b9-2118-4fb7-8edb-8328bfdc1c94" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#jobs/@Element\",\"id\":\"ncj-windows-2012-r2-1\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2-1\",\"eTag\":\"0x8D72596299AC0BF\",\"lastModified\":\"2019-08-20T17:45:16.9544383Z\",\"creationTime\":\"2019-08-20T17:45:16.9394467Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:16.9544383Z\",\"priority\":0,\"usesTaskDependencies\":false,\"constraints\":{\r\n \"maxWallClockTime\":\"P10675199DT2H48M5.4775807S\",\"maxTaskRetryCount\":0\r\n },\"poolInfo\":{\r\n \"poolId\":\"ncj-windows-2012-r2\"\r\n },\"executionInfo\":{\r\n \"startTime\":\"2019-08-20T17:45:16.9544383Z\",\"poolId\":\"ncj-windows-2012-r2\"\r\n },\"onAllTasksComplete\":\"noaction\",\"onTaskFailure\":\"noaction\"\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2-1/tasks?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "45b60ec6-c372-11e9-b906-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:17 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:TnyDHUqfGG++OCjU2ToThb19pHa957TE8Sy/CfytQcM=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:17 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "73c27ea7-edab-4c74-b383-b8698b8abded" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#tasks\",\"value\":[\r\n {\r\n \"id\":\"myTask\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2-1/tasks/myTask\",\"eTag\":\"0x8D7259629DC8103\",\"creationTime\":\"2019-08-20T17:45:17.3853443Z\",\"lastModified\":\"2019-08-20T17:45:17.3853443Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:17.3853443Z\",\"commandLine\":\"cmd /c echo | set /p dummy=test\",\"outputFiles\":[\r\n {\r\n \"filePattern\":\"$AZ_BATCH_TASK_DIR/*.txt\",\"destination\":{\r\n \"container\":{\r\n \"containerUrl\":\"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A30%3A16Z&se=2019-08-27T17%3A45%3A16Z&sp=w&sv=2017-07-29&sr=c&sig=bVYOPdfrMPuJKZS0wQROeVCiAnU8Kxrjj1SRBO3gp5M%3D\"\r\n }\r\n },\"uploadOptions\":{\r\n \"uploadCondition\":\"TaskSuccess\"\r\n }\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"nonadmin\"\r\n }\r\n },\"constraints\":{\r\n \"maxWallClockTime\":\"P10675199DT2H48M5.4775807S\",\"retentionTime\":\"PT1H\",\"maxTaskRetryCount\":0\r\n },\"executionInfo\":{\r\n \"retryCount\":0,\"requeueCount\":0\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2-1/tasks?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "478a8b4c-c372-11e9-b188-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:20 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:/le2GBHN9MKRVtoNO4PoGUJ3gKZCRijaeK/FLxWPmQU=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:20 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "8bfaaf9e-dd8d-4eef-8764-1d76e36188ce" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#tasks\",\"value\":[\r\n {\r\n \"id\":\"myTask\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2-1/tasks/myTask\",\"eTag\":\"0x8D7259629DC8103\",\"creationTime\":\"2019-08-20T17:45:17.3853443Z\",\"lastModified\":\"2019-08-20T17:45:17.3853443Z\",\"state\":\"completed\",\"stateTransitionTime\":\"2019-08-20T17:45:18.734037Z\",\"previousState\":\"running\",\"previousStateTransitionTime\":\"2019-08-20T17:45:18.062269Z\",\"commandLine\":\"cmd /c echo | set /p dummy=test\",\"outputFiles\":[\r\n {\r\n \"filePattern\":\"$AZ_BATCH_TASK_DIR/*.txt\",\"destination\":{\r\n \"container\":{\r\n \"containerUrl\":\"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A30%3A16Z&se=2019-08-27T17%3A45%3A16Z&sp=w&sv=2017-07-29&sr=c&sig=bVYOPdfrMPuJKZS0wQROeVCiAnU8Kxrjj1SRBO3gp5M%3D\"\r\n }\r\n },\"uploadOptions\":{\r\n \"uploadCondition\":\"TaskSuccess\"\r\n }\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"nonadmin\"\r\n }\r\n },\"constraints\":{\r\n \"maxWallClockTime\":\"P10675199DT2H48M5.4775807S\",\"retentionTime\":\"PT1H\",\"maxTaskRetryCount\":0\r\n },\"executionInfo\":{\r\n \"startTime\":\"2019-08-20T17:45:18.062269Z\",\"endTime\":\"2019-08-20T17:45:18.734037Z\",\"exitCode\":0,\"result\":\"success\",\"retryCount\":0,\"requeueCount\":0\r\n },\"nodeInfo\":{\r\n \"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"nodeUrl\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"poolId\":\"ncj-windows-2012-r2\",\"nodeId\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"taskRootDirectory\":\"workitems\\\\ncj-windows-2012-r2-1\\\\job-1\\\\myTask\",\"taskRootDirectoryUrl\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d/files/workitems/ncj-windows-2012-r2-1/job-1/myTask\"\r\n }\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2-1/tasks/myTask?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "47946966-c372-11e9-9c40-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:20 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:Pqp1LVID6HMpKmMiKIJJ4WcYcReN7QJpOfm8L+272i4=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:20 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D7259629DC8103" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:17 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "10efabb0-f391-4955-9578-26d695dbecb6" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#tasks/@Element\",\"id\":\"myTask\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2-1/tasks/myTask\",\"eTag\":\"0x8D7259629DC8103\",\"creationTime\":\"2019-08-20T17:45:17.3853443Z\",\"lastModified\":\"2019-08-20T17:45:17.3853443Z\",\"state\":\"completed\",\"stateTransitionTime\":\"2019-08-20T17:45:18.734037Z\",\"previousState\":\"running\",\"previousStateTransitionTime\":\"2019-08-20T17:45:18.062269Z\",\"commandLine\":\"cmd /c echo | set /p dummy=test\",\"outputFiles\":[\r\n {\r\n \"filePattern\":\"$AZ_BATCH_TASK_DIR/*.txt\",\"destination\":{\r\n \"container\":{\r\n \"containerUrl\":\"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A30%3A16Z&se=2019-08-27T17%3A45%3A16Z&sp=w&sv=2017-07-29&sr=c&sig=bVYOPdfrMPuJKZS0wQROeVCiAnU8Kxrjj1SRBO3gp5M%3D\"\r\n }\r\n },\"uploadOptions\":{\r\n \"uploadCondition\":\"TaskSuccess\"\r\n }\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"nonadmin\"\r\n }\r\n },\"constraints\":{\r\n \"maxWallClockTime\":\"P10675199DT2H48M5.4775807S\",\"retentionTime\":\"PT1H\",\"maxTaskRetryCount\":0\r\n },\"executionInfo\":{\r\n \"startTime\":\"2019-08-20T17:45:18.062269Z\",\"endTime\":\"2019-08-20T17:45:18.734037Z\",\"exitCode\":0,\"result\":\"success\",\"retryCount\":0,\"requeueCount\":0\r\n },\"nodeInfo\":{\r\n \"affinityId\":\"TVM:tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"nodeUrl\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"poolId\":\"ncj-windows-2012-r2\",\"nodeId\":\"tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d\",\"taskRootDirectory\":\"workitems\\\\ncj-windows-2012-r2-1\\\\job-1\\\\myTask\",\"taskRootDirectoryUrl\":\"https://sdktest2.westcentralus.batch.azure.com/pools/ncj-windows-2012-r2/nodes/tvmps_ab68cd4d729ca9bc23e5ffff7aa674c279f32a8304c6be1593d4e03e218f6651_d/files/workitems/ncj-windows-2012-r2-1/job-1/myTask\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-output?restype=container&comp=list", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "479e95ca-c372-11e9-b8ca-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:45:20 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:V/UZfT6VOri2g7A27qdzLqllxvir/pooZRSX4aUionA=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:19 GMT" - ] - }, - "body": { - "string": "\ufefffileuploaderr.txtTue, 20 Aug 2019 17:45:18 GMT0x8D725962A9D9FBA0application/octet-stream1B2M2Y8AsgTpgAmY7PhCfg==BlockBlobHottrueunlockedavailabletruefileuploadout.txtTue, 20 Aug 2019 17:45:18 GMT0x8D725962A9F4DB3525application/octet-streamqkl+koQBVfcwsY+RqYYRgQ==BlockBlobHottrueunlockedavailabletruestderr.txtTue, 20 Aug 2019 17:45:18 GMT0x8D725962AA170FA0application/octet-stream1B2M2Y8AsgTpgAmY7PhCfg==BlockBlobHottrueunlockedavailabletruestdout.txtTue, 20 Aug 2019 17:45:18 GMT0x8D725962AA3460A4application/octet-streamCY9rzUYh03PK3k6DJie09g==BlockBlobHottrueunlockedavailabletrue" - } - } - }, - { - "request": { - "method": "DELETE", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/ncj-windows-2012-r2-1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "47aa580a-c372-11e9-8d64-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "0" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:20 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:3e/clmvwyBmJVTrI90SmYPcE3XHtVAkT455y792HKus=" - ] - } - }, - "response": { - "status": { - "code": 202, - "message": "Accepted" - }, - "headers": { - "Date": [ - "Tue, 20 Aug 2019 17:45:20 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "5039fb54-1cb0-4b76-a0cf-e16fddd14f9f" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools?api-version=2019-08-01.10.0", - "body": "{\"id\": \"blobsource1\", \"displayName\": \"Blender Ubuntu standard pool\", \"vmSize\": \"Standard_D1_v2\", \"virtualMachineConfiguration\": {\"imageReference\": {\"publisher\": \"Canonical\", \"offer\": \"UbuntuServer\", \"sku\": \"16.04.0-LTS\", \"version\": \"latest\"}, \"nodeAgentSKUId\": \"batch.node.ubuntu 16.04\"}, \"targetDedicatedNodes\": 1, \"targetLowPriorityNodes\": 0, \"enableAutoScale\": false, \"startTask\": {\"commandLine\": \"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\", \"resourceFiles\": [{\"httpUrl\": \"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\", \"filePath\": \"setup-linux-pool.sh\"}], \"userIdentity\": {\"autoUser\": {\"scope\": \"pool\", \"elevationLevel\": \"admin\"}}, \"maxTaskRetryCount\": 0, \"waitForSuccess\": true}}", - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Content-Type": [ - "application/json; odata=minimalmetadata; charset=utf-8" - ], - "client-request-id": [ - "47b4d5ca-c372-11e9-bb31-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "757" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:20 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:Nbd1SyrR1LL0C+r9LMZPVSmo8eoQwVCwsn3iyW8YQc4=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "DataServiceId": [ - "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:20 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Location": [ - "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1" - ], - "Transfer-Encoding": [ - "chunked" - ], - "request-id": [ - "f3ca63b2-6f68-426c-ab9f-99923cff795e" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "47eca2ac-c372-11e9-810b-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:VVmV7Y4ckii4aKVHUX7L0hCHIBxrv9AcaDpeQSzNLHg=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:20 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "839d3d5d-5814-437e-b5a7-bdcf430d0129" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "49c01926-c372-11e9-8444-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:24 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:CHsDCW2JEyxhM/tzIFueAWbXXM2W8Tz0cxGkjKkQJtE=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:23 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "1b8c07e6-6074-4850-a237-467cad41d7ec" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "4b93d6ae-c372-11e9-a3e3-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:27 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:atJV14scQOZoo2OEbJoN8LvAY0WExJZCzIDM9aWOMrg=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:27 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "3dd66382-3cce-4f21-9c57-0920531636e8" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "4d68c5c6-c372-11e9-a3c4-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:30 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:n2gFWdi/b6F7vO/JHpr9ICD4KRRfCm+4PIeGIcMg6zA=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:29 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "c8624a04-9833-461c-9a5b-648d548e44cd" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "4f3d608a-c372-11e9-a1ee-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:33 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:O/8+sj6pomr4ISqqO9N7IS7+uZO7md/OJJ8XRzesnjE=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:33 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "be444d3a-4774-45f5-b5bb-6ae858d64424" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "51114b68-c372-11e9-b836-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:36 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:X80axkerQm+C3mFT6UQKtWRS2rkWSFkAc0acT2lfBOA=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:35 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "93702ffe-8e8d-4b88-b630-c34ddf932cf0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "52e48be4-c372-11e9-aeed-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:39 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:bKFtiYVRU3Z1dOYdXfu7n49G5TZ+1BRrF74wKo8aINs=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:39 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "1425543d-fb03-4307-bc1f-81f19ed45112" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "54b7d9a6-c372-11e9-91b0-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:42 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:ZcdNTD1N2TWBwwGe+YMNxIXIXrcFyAPtsvogQuax7FA=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:42 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "d1ca0d37-115e-48be-b809-ab6988e837bd" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "568dc242-c372-11e9-b2b7-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:45 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:bbeAKBlsA+34ho857UnsgL8KsEB2rqLMo2ZobsFjuwU=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:45 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "75e3361f-cedf-4cd0-a5c5-f53d9a909abe" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "5860a7f6-c372-11e9-99bb-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:48 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:sccm3YU5dTg28SNdrE1oCaqefGO5k6tfpQB16Rn9wjI=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:48 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "7ee81982-65ea-4a7c-9f48-fda518631571" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "5a37574a-c372-11e9-92a7-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:51 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:X2GO1nWxAzV8ql1PeBCWJ2fe6rKsznlmqwCyKG64S6E=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:51 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "53a42575-d0c2-46e8-b276-4e93fa0fc219" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "5c0fa686-c372-11e9-b91c-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:54 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:/Mw2aVV9YAA/uA5EhWuXVNjPglx16rTYtJyA9vq22U8=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:54 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "82b2078d-6a5e-4ad2-b801-0a1489a826c3" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "5de3b250-c372-11e9-ad92-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:45:58 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:8WQ9mYqxkoX138Galbp0Yx5sEQyW1a7Hl6Db3qtbYIM=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:45:57 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "143807ae-2c22-493b-9ce2-31c74719b5ef" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "5fba9c5c-c372-11e9-b9f2-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:46:01 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:7X4QuUKm51a+kUygJBcMXDUx2QATnlrUm1l5pxP/jRA=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:01 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "b302e78d-6439-49d5-bc85-dbf167d70c69" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "61923d1c-c372-11e9-b01b-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:46:04 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:gTbr+5VkwhTNNIXSF5wu+h5ifou19fOwh2HQMoefFFM=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:03 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "bb55b372-7e59-4b7c-8948-a70782433662" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "6367a9de-c372-11e9-9ec3-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:46:07 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:ALOSRO+vz38KYlRxlsPy1mayK+lm6UuIyF5GK/wtYJM=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:06 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "032f5991-361f-4e50-a562-cf5df41ec619" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "653cf14a-c372-11e9-9b94-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:46:10 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:/f+geWjGZyK1Rc0sTRs7nggTKkX6DzLqxe5CyI5+p4w=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:10 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "e5b37bc7-8c7e-445f-b036-1b62064b65f1" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "670f98e2-c372-11e9-bd73-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:46:13 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:wtWUHV/+pQ9x5nl1JO3thwwBk+XGq/LzU5hC4Zd6zDw=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:13 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "99c76b74-d005-46e9-afd7-3f88f908c68b" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "68e313c0-c372-11e9-a881-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:46:16 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:JcdqGi3P2XAXRS0JDayWC7yp9RWCh8qUzBR4vqnNGCA=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:16 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "5cf54389-9bf7-4581-ad45-34b21384bea3" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "6ab6f8b4-c372-11e9-a0c8-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:46:19 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:nkRkRvY4DTVViGGqxYg6Y4GbIEe+DkZBgsbJh/E4O5o=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:19 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "0a94af53-d0ec-4c04-b3ce-97b665235eea" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "6c904654-c372-11e9-9d4d-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:46:22 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:J79K//NC0/jRnQiRaLu9N0T8LgI1mEeXcMM9BC6ckjQ=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:21 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "af2f280d-d411-493c-afa3-b6d9da16aa1b" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "6e665b68-c372-11e9-9e79-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:46:25 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:8uzNcOB5A5/IGgKQ8v/z1cGU72tLlFTKBq4/fdmNz0w=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:25 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "dfd8a08e-5ee0-4240-9ff7-d0bf10e5a6dc" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "703b9292-c372-11e9-80b8-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:46:28 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:5i4EP2WPEtovbPGofxzhdjSwDJhYxuj7YhiWFxIKUfU=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:27 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "cc6b86c8-82b8-418c-a143-8ba566e7143e" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "720f5d1a-c372-11e9-8120-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:46:31 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:LqZBE3t/qp4E7FGtl2eNlp5Pg6MNHzRsb67VaXjzlCY=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:31 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "db9735d7-0546-4e9e-a51a-04a5cca3631e" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "73e48082-c372-11e9-a9f5-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:46:34 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:pIUDSeQn7ujWmscYWU4ubB+KmPTOD5k207+ftO+4g4U=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:34 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "daaed897-1f2b-48db-aebd-058e6cf0b2af" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "75b809b4-c372-11e9-9fb2-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:46:38 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:ThwCIFWg59ctGHQL1uTzjQu2AKVNe6YjipdPgcA8Znw=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:37 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "87a259dd-2f97-4cb9-875b-dc52bccf2e3a" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "778bd4da-c372-11e9-96bd-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:46:41 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:9jO83v6JYKMU7pIffeJK6xikqkmMoxIn62m7fCFtfWA=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:40 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "1d5059e6-c785-4d14-b57e-198a2a0a7b42" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "79601f54-c372-11e9-bde6-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:46:44 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:YBl5KHi3iYOH0EKy8Qouk6pwZc5BGrlVHy3LOiVxPC4=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:43 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "90d3ad71-1823-4618-9c3e-0d5ff5437ef2" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "7b337940-c372-11e9-8689-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:46:47 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:RY+p42LY4oCMYdO7k9XBA6XxQjU3MCX5jMarVHne0+Y=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:46 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "44a2ae57-53ae-4f47-abcb-ef8c8a2b2e83" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "7d08ad58-c372-11e9-bd0b-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:46:50 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:0zILMGBISNM413Rklm0U33wDfGZDG4CxU3GDGzWa6Cw=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:50 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "95cea4be-4652-402a-b39e-9ee5b0296995" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"resizing\",\"allocationStateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":0,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "7edd9662-c372-11e9-a7af-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:46:53 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:k12KFjb7CrhpUlqBnzr8lRGqI2/CcDTj+xrMYqIAuYo=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:53 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D725962C1A843B" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:45:21 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "request-id": [ - "67a717ba-17ad-493e-9aab-257290ccda4f" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#pools/@Element\",\"id\":\"blobsource1\",\"displayName\":\"Blender Ubuntu standard pool\",\"url\":\"https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1\",\"eTag\":\"0x8D725962C1A843B\",\"lastModified\":\"2019-08-20T17:45:21.1471931Z\",\"creationTime\":\"2019-08-20T17:45:21.1471931Z\",\"state\":\"active\",\"stateTransitionTime\":\"2019-08-20T17:45:21.1471931Z\",\"allocationState\":\"steady\",\"allocationStateTransitionTime\":\"2019-08-20T17:46:52.2025916Z\",\"vmSize\":\"standard_d1_v2\",\"resizeTimeout\":\"PT15M\",\"currentDedicatedNodes\":1,\"targetDedicatedNodes\":1,\"currentLowPriorityNodes\":0,\"targetLowPriorityNodes\":0,\"enableAutoScale\":false,\"enableInterNodeCommunication\":false,\"startTask\":{\r\n \"commandLine\":\"/bin/bash -c 'set -e; set -o pipefail; sleep 1; wait'\",\"resourceFiles\":[\r\n {\r\n \"httpUrl\":\"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\"filePath\":\"setup-linux-pool.sh\"\r\n }\r\n ],\"userIdentity\":{\r\n \"autoUser\":{\r\n \"scope\":\"pool\",\"elevationLevel\":\"admin\"\r\n }\r\n },\"maxTaskRetryCount\":0,\"waitForSuccess\":true\r\n },\"maxTasksPerNode\":1,\"taskSchedulingPolicy\":{\r\n \"nodeFillType\":\"Spread\"\r\n },\"virtualMachineConfiguration\":{\r\n \"imageReference\":{\r\n \"publisher\":\"Canonical\",\"offer\":\"UbuntuServer\",\"sku\":\"16.04.0-LTS\",\"version\":\"latest\"\r\n },\"nodeAgentSKUId\":\"batch.node.ubuntu 16.04\"\r\n }\r\n}" - } - } - }, - { - "request": { - "method": "DELETE", - "uri": "https://sdktest2.westcentralus.batch.azure.com/pools/blobsource1?api-version=2019-08-01.10.0", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "client-request-id": [ - "7ee6ae28-c372-11e9-85f8-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "0" - ], - "ocp-date": [ - "Tue, 20 Aug 2019 17:46:53 GMT" - ], - "Authorization": [ - "SharedKey sdktest2:sw0Ru0r1hk+uWbtvVWSy/y2EeBc+clW/u8ytk8rUaDY=" - ] - } - }, - "response": { - "status": { - "code": 202, - "message": "Accepted" - }, - "headers": { - "Date": [ - "Tue, 20 Aug 2019 17:46:53 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "747d94a8-712d-4af2-814e-50b396ee5ef0" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs?api-version=2019-08-01.10.0&timeout=30", - "body": "{\"id\": \"job123\", \"poolInfo\": {\"autoPoolSpecification\": {\"autoPoolIdPrefix\": \"pool123\", \"poolLifetimeOption\": \"job\", \"keepAlive\": false, \"pool\": {\"vmSize\": \"STANDARD_D1_V2\", \"virtualMachineConfiguration\": {\"imageReference\": {\"publisher\": \"Canonical\", \"offer\": \"UbuntuServer\", \"sku\": \"16.04.0-LTS\", \"version\": \"latest\"}, \"nodeAgentSKUId\": \"batch.node.ubuntu 16.04\"}, \"targetDedicatedNodes\": 1}}}, \"onAllTasksComplete\": \"noaction\"}", - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL2JhdGNoLmNvcmUud2luZG93cy5uZXQvIiwiaXNzIjoiaHR0cHM6Ly9zdHMud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3LyIsImlhdCI6MTU2NjMyMDcyNCwibmJmIjoxNTY2MzIwNzI0LCJleHAiOjE1NjYzMjQ2MjQsIl9jbGFpbV9uYW1lcyI6eyJncm91cHMiOiJzcmMxIn0sIl9jbGFpbV9zb3VyY2VzIjp7InNyYzEiOnsiZW5kcG9pbnQiOiJodHRwczovL2dyYXBoLndpbmRvd3MubmV0LzcyZjk4OGJmLTg2ZjEtNDFhZi05MWFiLTJkN2NkMDExZGI0Ny91c2Vycy8yNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEvZ2V0TWVtYmVyT2JqZWN0cyJ9fSwiYWNyIjoiMSIsImFpbyI6IkFVUUF1LzhNQUFBQUJmSVhrS1pUNXN2dGVyVzhpeVgyQ1JCODlJc2dTVFJtZFdPeHR0aFNMVXZzZEtwd0YxTmloNjFtcEdMYjRnNmxES01Md0lMTmtBSkhCblBCSithdU5BPT0iLCJhbXIiOlsicnNhIiwibWZhIl0sImFwcGlkIjoiMDRiMDc3OTUtOGRkYi00NjFhLWJiZWUtMDJmOWUxYmY3YjQ2IiwiYXBwaWRhY3IiOiIwIiwiZGV2aWNlaWQiOiIxZDUwYzVkZi1mZDAxLTRhNjQtODg1OS04NDcyMzc1OGEyNDQiLCJmYW1pbHlfbmFtZSI6IktsZWluIiwiZ2l2ZW5fbmFtZSI6IkJyYW5kb24iLCJpcGFkZHIiOiIxMzEuMTA3LjE1OS4yMiIsIm5hbWUiOiJCcmFuZG9uIEtsZWluIiwib2lkIjoiMjcyNDQ5MzUtYTRiOS00MGE0LWEyNzItNDI5NDJiNjdlY2YxIiwib25wcmVtX3NpZCI6IlMtMS01LTIxLTIxMjc1MjExODQtMTYwNDAxMjkyMC0xODg3OTI3NTI3LTMwODY5MTc0IiwicHVpZCI6IjEwMDMwMDAwQTkxNzc4OUUiLCJzY3AiOiJ1c2VyX2ltcGVyc29uYXRpb24iLCJzdWIiOiJaTnRJSW14ajVlSk9TbnJRTXh1UTFGeGVPOHhiYnVhQmFrU0FYYjRqRE84IiwidGlkIjoiNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3IiwidW5pcXVlX25hbWUiOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1cG4iOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1dGkiOiIzcU02WE1IMS1VeTc1OGREc3VFUUFBIiwidmVyIjoiMS4wIn0.6wF-URC5pN8R6lYNu887Vqul47X3Kpm5g_d0S6tYjtW42KcCv95dvXiWr3_xQ62vDBHLekWJUFTDt-JIa-7Jothw-k4LGIe4OyT3c5VeLMupH5fepX8puj3cfxUAubdUIwq3nw8XHksD979tOyFh_lOCyHPNs69UgpQUufHkX-262eCQjlQoXTigdmxd4uhW7ybcLKxTyIh16K8JI3tHU6lQQDeKGDVqgkXTWqAHWhlHiaZ8SYsfjV07lLS-YnBmjyM16WHnDCaUwDy326rKfbdsAS2r6br2NERDpX_yoq01rFxP1mzQrnokb7sAJBQbV5dqalO3kU0JwvcGwhO3hQ" - ], - "Content-Type": [ - "application/json; odata=minimalmetadata; charset=utf-8" - ], - "client-request-id": [ - "7f1a2d76-c372-11e9-aec1-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "return-client-request-id": [ - "false" - ], - "Content-Length": [ - "428" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "DataServiceId": [ - "https://sdktest2.westcentralus.batch.azure.com/jobs/job-1" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:53 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "ETag": [ - "0x8D7259663BD0031" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:46:54 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Location": [ - "https://sdktest2.westcentralus.batch.azure.com/jobs/job-1" - ], - "Transfer-Encoding": [ - "chunked" - ], - "request-id": [ - "14cb0d4c-8d64-4cd1-b558-c4f955a61f55" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/job123/addtaskcollection?api-version=2019-08-01.10.0", - "body": "{\"value\": [{\"id\": \"1\", \"commandLine\": \"/bin/bash -c 'cat {fileName}'\", \"resourceFiles\": [{\"httpUrl\": \"https://testacct.blob.core.windows.net/\", \"filePath\": \"location\"}]}]}", - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL2JhdGNoLmNvcmUud2luZG93cy5uZXQvIiwiaXNzIjoiaHR0cHM6Ly9zdHMud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3LyIsImlhdCI6MTU2NjMyMDcyNCwibmJmIjoxNTY2MzIwNzI0LCJleHAiOjE1NjYzMjQ2MjQsIl9jbGFpbV9uYW1lcyI6eyJncm91cHMiOiJzcmMxIn0sIl9jbGFpbV9zb3VyY2VzIjp7InNyYzEiOnsiZW5kcG9pbnQiOiJodHRwczovL2dyYXBoLndpbmRvd3MubmV0LzcyZjk4OGJmLTg2ZjEtNDFhZi05MWFiLTJkN2NkMDExZGI0Ny91c2Vycy8yNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEvZ2V0TWVtYmVyT2JqZWN0cyJ9fSwiYWNyIjoiMSIsImFpbyI6IkFVUUF1LzhNQUFBQUJmSVhrS1pUNXN2dGVyVzhpeVgyQ1JCODlJc2dTVFJtZFdPeHR0aFNMVXZzZEtwd0YxTmloNjFtcEdMYjRnNmxES01Md0lMTmtBSkhCblBCSithdU5BPT0iLCJhbXIiOlsicnNhIiwibWZhIl0sImFwcGlkIjoiMDRiMDc3OTUtOGRkYi00NjFhLWJiZWUtMDJmOWUxYmY3YjQ2IiwiYXBwaWRhY3IiOiIwIiwiZGV2aWNlaWQiOiIxZDUwYzVkZi1mZDAxLTRhNjQtODg1OS04NDcyMzc1OGEyNDQiLCJmYW1pbHlfbmFtZSI6IktsZWluIiwiZ2l2ZW5fbmFtZSI6IkJyYW5kb24iLCJpcGFkZHIiOiIxMzEuMTA3LjE1OS4yMiIsIm5hbWUiOiJCcmFuZG9uIEtsZWluIiwib2lkIjoiMjcyNDQ5MzUtYTRiOS00MGE0LWEyNzItNDI5NDJiNjdlY2YxIiwib25wcmVtX3NpZCI6IlMtMS01LTIxLTIxMjc1MjExODQtMTYwNDAxMjkyMC0xODg3OTI3NTI3LTMwODY5MTc0IiwicHVpZCI6IjEwMDMwMDAwQTkxNzc4OUUiLCJzY3AiOiJ1c2VyX2ltcGVyc29uYXRpb24iLCJzdWIiOiJaTnRJSW14ajVlSk9TbnJRTXh1UTFGeGVPOHhiYnVhQmFrU0FYYjRqRE84IiwidGlkIjoiNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3IiwidW5pcXVlX25hbWUiOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1cG4iOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1dGkiOiIzcU02WE1IMS1VeTc1OGREc3VFUUFBIiwidmVyIjoiMS4wIn0.6wF-URC5pN8R6lYNu887Vqul47X3Kpm5g_d0S6tYjtW42KcCv95dvXiWr3_xQ62vDBHLekWJUFTDt-JIa-7Jothw-k4LGIe4OyT3c5VeLMupH5fepX8puj3cfxUAubdUIwq3nw8XHksD979tOyFh_lOCyHPNs69UgpQUufHkX-262eCQjlQoXTigdmxd4uhW7ybcLKxTyIh16K8JI3tHU6lQQDeKGDVqgkXTWqAHWhlHiaZ8SYsfjV07lLS-YnBmjyM16WHnDCaUwDy326rKfbdsAS2r6br2NERDpX_yoq01rFxP1mzQrnokb7sAJBQbV5dqalO3kU0JwvcGwhO3hQ" - ], - "Content-Type": [ - "application/json; odata=minimalmetadata; charset=utf-8" - ], - "client-request-id": [ - "7f8b457e-c372-11e9-8010-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "171" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:53 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "ba8403b7-edc4-4ade-a88f-ee1ad083527d" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#taskaddresult\",\"value\":[\r\n {\r\n \"status\":\"Success\",\"taskId\":\"1\",\"eTag\":\"0x8D7259663DE444D\",\"lastModified\":\"2019-08-20T17:46:54.7047501Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/job123/tasks/1\"\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "PATCH", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/job123?api-version=2019-08-01.10.0", - "body": "{\"onAllTasksComplete\": \"terminatejob\"}", - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL2JhdGNoLmNvcmUud2luZG93cy5uZXQvIiwiaXNzIjoiaHR0cHM6Ly9zdHMud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3LyIsImlhdCI6MTU2NjMyMDcyNCwibmJmIjoxNTY2MzIwNzI0LCJleHAiOjE1NjYzMjQ2MjQsIl9jbGFpbV9uYW1lcyI6eyJncm91cHMiOiJzcmMxIn0sIl9jbGFpbV9zb3VyY2VzIjp7InNyYzEiOnsiZW5kcG9pbnQiOiJodHRwczovL2dyYXBoLndpbmRvd3MubmV0LzcyZjk4OGJmLTg2ZjEtNDFhZi05MWFiLTJkN2NkMDExZGI0Ny91c2Vycy8yNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEvZ2V0TWVtYmVyT2JqZWN0cyJ9fSwiYWNyIjoiMSIsImFpbyI6IkFVUUF1LzhNQUFBQUJmSVhrS1pUNXN2dGVyVzhpeVgyQ1JCODlJc2dTVFJtZFdPeHR0aFNMVXZzZEtwd0YxTmloNjFtcEdMYjRnNmxES01Md0lMTmtBSkhCblBCSithdU5BPT0iLCJhbXIiOlsicnNhIiwibWZhIl0sImFwcGlkIjoiMDRiMDc3OTUtOGRkYi00NjFhLWJiZWUtMDJmOWUxYmY3YjQ2IiwiYXBwaWRhY3IiOiIwIiwiZGV2aWNlaWQiOiIxZDUwYzVkZi1mZDAxLTRhNjQtODg1OS04NDcyMzc1OGEyNDQiLCJmYW1pbHlfbmFtZSI6IktsZWluIiwiZ2l2ZW5fbmFtZSI6IkJyYW5kb24iLCJpcGFkZHIiOiIxMzEuMTA3LjE1OS4yMiIsIm5hbWUiOiJCcmFuZG9uIEtsZWluIiwib2lkIjoiMjcyNDQ5MzUtYTRiOS00MGE0LWEyNzItNDI5NDJiNjdlY2YxIiwib25wcmVtX3NpZCI6IlMtMS01LTIxLTIxMjc1MjExODQtMTYwNDAxMjkyMC0xODg3OTI3NTI3LTMwODY5MTc0IiwicHVpZCI6IjEwMDMwMDAwQTkxNzc4OUUiLCJzY3AiOiJ1c2VyX2ltcGVyc29uYXRpb24iLCJzdWIiOiJaTnRJSW14ajVlSk9TbnJRTXh1UTFGeGVPOHhiYnVhQmFrU0FYYjRqRE84IiwidGlkIjoiNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3IiwidW5pcXVlX25hbWUiOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1cG4iOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1dGkiOiIzcU02WE1IMS1VeTc1OGREc3VFUUFBIiwidmVyIjoiMS4wIn0.6wF-URC5pN8R6lYNu887Vqul47X3Kpm5g_d0S6tYjtW42KcCv95dvXiWr3_xQ62vDBHLekWJUFTDt-JIa-7Jothw-k4LGIe4OyT3c5VeLMupH5fepX8puj3cfxUAubdUIwq3nw8XHksD979tOyFh_lOCyHPNs69UgpQUufHkX-262eCQjlQoXTigdmxd4uhW7ybcLKxTyIh16K8JI3tHU6lQQDeKGDVqgkXTWqAHWhlHiaZ8SYsfjV07lLS-YnBmjyM16WHnDCaUwDy326rKfbdsAS2r6br2NERDpX_yoq01rFxP1mzQrnokb7sAJBQbV5dqalO3kU0JwvcGwhO3hQ" - ], - "Content-Type": [ - "application/json; odata=minimalmetadata; charset=utf-8" - ], - "client-request-id": [ - "7fadc10c-c372-11e9-abc9-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "38" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "DataServiceId": [ - "https://sdktest2.westcentralus.batch.azure.com/jobs/job123" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:54 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D7259663EC87A8" - ], - "request-id": [ - "fd61eb7a-8863-441f-aa5d-a5578f7f261e" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:46:54 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Batch/batchAccounts?api-version=2019-08-01", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-mgmt-batch/7.0.0 Azure-SDK-For-Python" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL21hbmFnZW1lbnQuY29yZS53aW5kb3dzLm5ldC8iLCJpc3MiOiJodHRwczovL3N0cy53aW5kb3dzLm5ldC83MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDcvIiwiaWF0IjoxNTY2MzIwNzE4LCJuYmYiOjE1NjYzMjA3MTgsImV4cCI6MTU2NjMyNDYxOCwiX2NsYWltX25hbWVzIjp7Imdyb3VwcyI6InNyYzEifSwiX2NsYWltX3NvdXJjZXMiOnsic3JjMSI6eyJlbmRwb2ludCI6Imh0dHBzOi8vZ3JhcGgud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3L3VzZXJzLzI3MjQ0OTM1LWE0YjktNDBhNC1hMjcyLTQyOTQyYjY3ZWNmMS9nZXRNZW1iZXJPYmplY3RzIn19LCJhY3IiOiIxIiwiYWlvIjoiQVVRQXUvOE1BQUFBT0lvTEVUclJGWnErQmFoaVNrVmhNNXR5QzYwSDZBSDNlZG5vMGJQbTFRYUtvV3Rva01QaDdiZjIvM0VFZ0NHbmo0UFFWY3FHaXdVbkFQYjRONmZwZ1E9PSIsImFtciI6WyJyc2EiLCJtZmEiXSwiYXBwaWQiOiIwNGIwNzc5NS04ZGRiLTQ2MWEtYmJlZS0wMmY5ZTFiZjdiNDYiLCJhcHBpZGFjciI6IjAiLCJkZXZpY2VpZCI6IjFkNTBjNWRmLWZkMDEtNGE2NC04ODU5LTg0NzIzNzU4YTI0NCIsImZhbWlseV9uYW1lIjoiS2xlaW4iLCJnaXZlbl9uYW1lIjoiQnJhbmRvbiIsImlwYWRkciI6IjEzMS4xMDcuMTU5LjIyIiwibmFtZSI6IkJyYW5kb24gS2xlaW4iLCJvaWQiOiIyNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEiLCJvbnByZW1fc2lkIjoiUy0xLTUtMjEtMjEyNzUyMTE4NC0xNjA0MDEyOTIwLTE4ODc5Mjc1MjctMzA4NjkxNzQiLCJwdWlkIjoiMTAwMzAwMDBBOTE3Nzg5RSIsInNjcCI6InVzZXJfaW1wZXJzb25hdGlvbiIsInN1YiI6IjMtaVZMWlVxZzhyWVVFNHlLRXZPSktES0N2Z1I0SVJvQXJhVzlRWmJNRkEiLCJ0aWQiOiI3MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDciLCJ1bmlxdWVfbmFtZSI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInVwbiI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInV0aSI6IlZkQ05pT2w3Z0UyWkw3QTVBMFFPQUEiLCJ2ZXIiOiIxLjAifQ.XjlVAUievRf_e8bKWsAY7Ca1e2RR2FIB4PpXBKa6Vzy5xfZ_c33OFQWkB610FXt-E86jl61B0siTx1aVQQbXt9iAdqcfb27MKeDX_sXi_BjTUIA6xgfRm1CnG8vFq_GpLPy0GIgzuQkaPqPifXIz39SzMavmrLaAp5Ct1j09e9yXwcIxLhSRg_WibgqY22tbcremd_-y9qZex3xEzc798Nz62_AADDKgBjivlwxGX5TpOiEZxhNhD6pS4nlTJ4eiyS7mFRC1nIGB1SMZrgnWjQ5dRcib_7krgdW_4J-kqA-Tg4FGo8aPFBxjMADxfCOF04W2KykUZpLfF_9c2HZGoQ" - ], - "x-ms-client-request-id": [ - "7fe35b92-c372-11e9-8a63-44032c851686" - ], - "accept-language": [ - "en-US" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json; charset=utf-8" - ], - "Expires": [ - "-1" - ], - "Vary": [ - "Accept-Encoding" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:55 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "content-length": [ - "2729" - ], - "x-ms-original-request-ids": [ - "f6451908-52f2-4fe9-937d-76d9012e29af", - "779308ae-5bb3-442c-bccd-cb92c54137f0" - ], - "Cache-Control": [ - "no-cache" - ], - "Pragma": [ - "no-cache" - ] - }, - "body": { - "string": "{\"value\":[{\"id\":\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Batch/batchAccounts/byossc\",\"name\":\"byossc\",\"type\":\"Microsoft.Batch/batchAccounts\",\"location\":\"southcentralus\",\"properties\":{\"accountEndpoint\":\"byossc.southcentralus.batch.azure.com\",\"provisioningState\":\"Succeeded\",\"dedicatedCoreQuotaPerVMFamilyEnforced\":false,\"poolQuota\":100,\"activeJobAndJobScheduleQuota\":300,\"poolAllocationMode\":\"UserSubscription\",\"keyVaultReference\":{\"id\":\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.KeyVault/vaults/byossc\",\"url\":\"https://byossc.vault.azure.net/\"}}},{\"id\":\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Batch/batchAccounts/sdktest2\",\"name\":\"sdktest2\",\"type\":\"Microsoft.Batch/batchAccounts\",\"location\":\"westcentralus\",\"properties\":{\"accountEndpoint\":\"sdktest2.westcentralus.batch.azure.com\",\"provisioningState\":\"Succeeded\",\"dedicatedCoreQuota\":20,\"dedicatedCoreQuotaPerVMFamily\":[{\"name\":\"standardA0_A7Family\",\"coreQuota\":20},{\"name\":\"standardDv2Family\",\"coreQuota\":20},{\"name\":\"standardA8_A11Family\",\"coreQuota\":0},{\"name\":\"standardDFamily\",\"coreQuota\":0},{\"name\":\"standardGFamily\",\"coreQuota\":0},{\"name\":\"basicAFamily\",\"coreQuota\":0},{\"name\":\"standardFFamily\",\"coreQuota\":0},{\"name\":\"standardNVFamily\",\"coreQuota\":0},{\"name\":\"standardNVPromoFamily\",\"coreQuota\":0},{\"name\":\"standardNCFamily\",\"coreQuota\":0},{\"name\":\"standardNCPromoFamily\",\"coreQuota\":0},{\"name\":\"standardHFamily\",\"coreQuota\":0},{\"name\":\"standardHPromoFamily\",\"coreQuota\":0},{\"name\":\"standardAv2Family\",\"coreQuota\":0},{\"name\":\"standardMSFamily\",\"coreQuota\":0},{\"name\":\"standardDv3Family\",\"coreQuota\":0},{\"name\":\"standardEv3Family\",\"coreQuota\":0},{\"name\":\"standardDSFamily\",\"coreQuota\":0},{\"name\":\"standardDSv2Family\",\"coreQuota\":0},{\"name\":\"standardDSv3Family\",\"coreQuota\":0},{\"name\":\"standardFSFamily\",\"coreQuota\":0},{\"name\":\"standardESv3Family\",\"coreQuota\":0},{\"name\":\"standardGSFamily\",\"coreQuota\":0},{\"name\":\"standardLSFamily\",\"coreQuota\":0},{\"name\":\"standardNCSv2Family\",\"coreQuota\":0},{\"name\":\"standardNDSFamily\",\"coreQuota\":0},{\"name\":\"standardNCSv3Family\",\"coreQuota\":0},{\"name\":\"standardFSv2Family\",\"coreQuota\":0},{\"name\":\"standardHBSFamily\",\"coreQuota\":0},{\"name\":\"standardHCSFamily\",\"coreQuota\":0}],\"dedicatedCoreQuotaPerVMFamilyEnforced\":false,\"lowPriorityCoreQuota\":100,\"poolQuota\":100,\"activeJobAndJobScheduleQuota\":300,\"autoStorage\":{\"storageAccountId\":\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Storage/storageAccounts/sdkteststore2\",\"lastKeySync\":\"2019-07-16T21:55:40.4909987Z\"},\"poolAllocationMode\":\"BatchService\"},\"tags\":{\"rawr\":\"test\"}}]}" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Storage/storageAccounts/sdkteststore2/listKeys?api-version=2018-02-01", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-mgmt-storage/2.0.0 Azure-SDK-For-Python" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL21hbmFnZW1lbnQuY29yZS53aW5kb3dzLm5ldC8iLCJpc3MiOiJodHRwczovL3N0cy53aW5kb3dzLm5ldC83MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDcvIiwiaWF0IjoxNTY2MzIwNzE4LCJuYmYiOjE1NjYzMjA3MTgsImV4cCI6MTU2NjMyNDYxOCwiX2NsYWltX25hbWVzIjp7Imdyb3VwcyI6InNyYzEifSwiX2NsYWltX3NvdXJjZXMiOnsic3JjMSI6eyJlbmRwb2ludCI6Imh0dHBzOi8vZ3JhcGgud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3L3VzZXJzLzI3MjQ0OTM1LWE0YjktNDBhNC1hMjcyLTQyOTQyYjY3ZWNmMS9nZXRNZW1iZXJPYmplY3RzIn19LCJhY3IiOiIxIiwiYWlvIjoiQVVRQXUvOE1BQUFBT0lvTEVUclJGWnErQmFoaVNrVmhNNXR5QzYwSDZBSDNlZG5vMGJQbTFRYUtvV3Rva01QaDdiZjIvM0VFZ0NHbmo0UFFWY3FHaXdVbkFQYjRONmZwZ1E9PSIsImFtciI6WyJyc2EiLCJtZmEiXSwiYXBwaWQiOiIwNGIwNzc5NS04ZGRiLTQ2MWEtYmJlZS0wMmY5ZTFiZjdiNDYiLCJhcHBpZGFjciI6IjAiLCJkZXZpY2VpZCI6IjFkNTBjNWRmLWZkMDEtNGE2NC04ODU5LTg0NzIzNzU4YTI0NCIsImZhbWlseV9uYW1lIjoiS2xlaW4iLCJnaXZlbl9uYW1lIjoiQnJhbmRvbiIsImlwYWRkciI6IjEzMS4xMDcuMTU5LjIyIiwibmFtZSI6IkJyYW5kb24gS2xlaW4iLCJvaWQiOiIyNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEiLCJvbnByZW1fc2lkIjoiUy0xLTUtMjEtMjEyNzUyMTE4NC0xNjA0MDEyOTIwLTE4ODc5Mjc1MjctMzA4NjkxNzQiLCJwdWlkIjoiMTAwMzAwMDBBOTE3Nzg5RSIsInNjcCI6InVzZXJfaW1wZXJzb25hdGlvbiIsInN1YiI6IjMtaVZMWlVxZzhyWVVFNHlLRXZPSktES0N2Z1I0SVJvQXJhVzlRWmJNRkEiLCJ0aWQiOiI3MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDciLCJ1bmlxdWVfbmFtZSI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInVwbiI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInV0aSI6IlZkQ05pT2w3Z0UyWkw3QTVBMFFPQUEiLCJ2ZXIiOiIxLjAifQ.XjlVAUievRf_e8bKWsAY7Ca1e2RR2FIB4PpXBKa6Vzy5xfZ_c33OFQWkB610FXt-E86jl61B0siTx1aVQQbXt9iAdqcfb27MKeDX_sXi_BjTUIA6xgfRm1CnG8vFq_GpLPy0GIgzuQkaPqPifXIz39SzMavmrLaAp5Ct1j09e9yXwcIxLhSRg_WibgqY22tbcremd_-y9qZex3xEzc798Nz62_AADDKgBjivlwxGX5TpOiEZxhNhD6pS4nlTJ4eiyS7mFRC1nIGB1SMZrgnWjQ5dRcib_7krgdW_4J-kqA-Tg4FGo8aPFBxjMADxfCOF04W2KykUZpLfF_9c2HZGoQ" - ], - "Content-Type": [ - "application/json; charset=utf-8" - ], - "x-ms-client-request-id": [ - "8028bfa2-c372-11e9-9035-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json" - ], - "Expires": [ - "-1" - ], - "Vary": [ - "Accept-Encoding" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:55 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "Server": [ - "Microsoft-Azure-Storage-Resource-Provider/1.0,Microsoft-HTTPAPI/2.0 Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "Cache-Control": [ - "no-cache" - ], - "x-ms-ratelimit-remaining-subscription-writes": [ - "1199" - ], - "Pragma": [ - "no-cache" - ], - "content-length": [ - "288" - ] - }, - "body": { - "string": "{\"keys\":[{\"keyName\":\"key1\",\"value\":\"abc==\",\"permissions\":\"FULL\"},{\"keyName\":\"key2\",\"value\":\"def==\",\"permissions\":\"FULL\"}]}" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "80608112-c372-11e9-a4bf-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:55 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:kSj4yabG3dDvxSHTtdVMC6Irdl2tYoImswKXUww/ctE=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "x-ms-version": [ - "2017-07-29" - ], - "ETag": [ - "\"0x8D7259664B9CF06\"" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:55 GMT" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-parameters.json?comp=metadata", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "808bec6e-c372-11e9-9ffb-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:J5/0NQDRDfcE8qLM2AzV3zQM1tEWXv1RsGMWxI+dcRo=" - ] - } - }, - "response": { - "status": { - "code": 404, - "message": "The specified blob does not exist." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "BlobNotFound" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:55 GMT" - ], - "Content-Length": [ - "215" - ] - }, - "body": { - "string": "\ufeffBlobNotFoundThe specified blob does not exist.\nRequestId:1b8de5d7-401e-00ac-117f-574597000000\nTime:2019-08-20T17:46:56.2265985Z" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-parameters.json", - "body": "{\r\n \"templateMetadata\": {\r\n \"description\": \"A test application template that makes use of multiple parameters after properly declaring them.\"\r\n },\r\n \"jobManagerTask\": {\r\n \"id\":\"mytask1\",\r\n \"commandLine\":\"myprogram.exe\",\r\n \"resourceFiles\": [ {\r\n \"httpUrl\":\"http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?st=2013-08-09T08%3a49%3a37.0000000Z&se=2013-08-10T08%3a49%3a37.0000000Z&sr=c&sp=d&si=YWJjZGTVMZw%3d%3d&sig= %2bSzBm0wi8xECuGkKw97wnkSZ%2f62sxU%2b6Hq6a7qojIVE%3d\",\r\n \"filePath\":\"myprogram.exe\"\r\n },\r\n {\r\n \"httpUrl\":\"http://mystorage1.blob.core.windows.net/scripts/test.txt?st=2013-08-09T08%3a49%3a37.0000000Z&se=2013-08-10T08%3a49%3a37.0000000Z&sr=c&sp=d&si=YWJjZGTVMZw%3d%3d&sig= %2bSzBm0wi8xECuGkKw97wnkSZ%2f62sxU%2b6Hq6a7qojIVE%3d\",\r\n \"filePath\":\"[parameters('blobName')]\"\r\n } ],\r\n \"environmentSettings\": [ {\r\n \"name\":\"myvariable\",\r\n \"value\":\"myvalue\"\r\n } ],\r\n \"constraints\": {\r\n \"maxWallClockTime\":\"PT1H\",\r\n \"maxTaskRetryCount\":0,\r\n \"retentionTime\":\"PT1H\"\r\n },\r\n \"killJobOnCompletion\":false,\r\n \"runElevated\":false,\r\n \"runExclusive\":true\r\n },\r\n \"metadata\": [ {\r\n \"name\":\"myproperty\",\r\n \"value\":\"[parameters('keyValue')]\"\r\n } ],\r\n \"parameters\": {\r\n \"blobName\" : {\r\n \"type\": \"string\"\r\n },\r\n \"keyValue\" : {\r\n \"type\": \"string\"\r\n }\r\n }\r\n}", - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-blob-type": [ - "BlockBlob" - ], - "x-ms-meta-lastmodified": [ - "1551725410.0615716" - ], - "Content-MD5": [ - "GhvIqLxdhgZSxhi6CdQjLQ==" - ], - "Content-Length": [ - "1377" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "8095893a-c372-11e9-927e-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:PUCU2uHpEkVCGmlNyUla4nKcIJPo9Om86OQDwhZb+80=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "x-ms-version": [ - "2017-07-29" - ], - "ETag": [ - "\"0x8D7259664CFA65D\"" - ], - "x-ms-request-server-encrypted": [ - "true" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:55 GMT" - ], - "Content-MD5": [ - "GhvIqLxdhgZSxhi6CdQjLQ==" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "80a2fb68-c372-11e9-bb67-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:CyBt37svbXQVkDm8TcIxt1XrynCFsfjwUyns4Py+IkA=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 409, - "message": "The specified container already exists." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "ContainerAlreadyExists" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:55 GMT" - ], - "Content-Length": [ - "230" - ] - }, - "body": { - "string": "\ufeffContainerAlreadyExistsThe specified container already exists.\nRequestId:1b8de5ef-401e-00ac-277f-574597000000\nTime:2019-08-20T17:46:56.3717012Z" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-prohibitedApplicationTemplateInfo.json?comp=metadata", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "80aae9a2-c372-11e9-8322-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:Zf0ra2bs/ZMFW3S78TLZ/z/EX6VhGnfPZfKDFZUbLXc=" - ] - } - }, - "response": { - "status": { - "code": 404, - "message": "The specified blob does not exist." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "BlobNotFound" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:55 GMT" - ], - "Content-Length": [ - "215" - ] - }, - "body": { - "string": "\ufeffBlobNotFoundThe specified blob does not exist.\nRequestId:1b8de602-401e-00ac-377f-574597000000\nTime:2019-08-20T17:46:56.4617644Z" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-prohibitedApplicationTemplateInfo.json", - "body": "{\r\n \"templateInfo\": {\r\n \"description\": \"A test application template that specifies the prohibited property 'applicationTemplate'.\"\r\n },\r\n \"applicationTemplateInfo\": {\r\n \"filePath\" : \"sample\\\\path\"\r\n }\r\n}\r\n\r\n\r\n", - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-blob-type": [ - "BlockBlob" - ], - "x-ms-meta-lastmodified": [ - "1524153754.252081" - ], - "Content-MD5": [ - "xbCZcjm1pOMcwR8Td2yo9w==" - ], - "Content-Length": [ - "219" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "80b9449c-c372-11e9-b98b-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:SqcoXkmohS99KXgZk8AwfYua3LtnjI/OuHVPYfawxds=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "x-ms-version": [ - "2017-07-29" - ], - "ETag": [ - "\"0x8D7259664F3133A\"" - ], - "x-ms-request-server-encrypted": [ - "true" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Content-MD5": [ - "xbCZcjm1pOMcwR8Td2yo9w==" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "80c1da5e-c372-11e9-809d-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:QrJ+WqbEoIEXu7zSgKaKmPGHnAJ7KRCdy7hIq3TD9ks=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 409, - "message": "The specified container already exists." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "ContainerAlreadyExists" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Content-Length": [ - "230" - ] - }, - "body": { - "string": "\ufeffContainerAlreadyExistsThe specified container already exists.\nRequestId:1b8de61a-401e-00ac-4e7f-574597000000\nTime:2019-08-20T17:46:56.5748448Z" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-prohibitedId.json?comp=metadata", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "80c9d28c-c372-11e9-b2d0-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:f5u7CiEeMWCzw0bHzb9rlYbRNLL98Fy7DWb21qgaTcg=" - ] - } - }, - "response": { - "status": { - "code": 404, - "message": "The specified blob does not exist." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "BlobNotFound" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Content-Length": [ - "215" - ] - }, - "body": { - "string": "\ufeffBlobNotFoundThe specified blob does not exist.\nRequestId:1b8de624-401e-00ac-587f-574597000000\nTime:2019-08-20T17:46:56.6308841Z" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-prohibitedId.json", - "body": "{\r\n \"templateInfo\": {\r\n \"description\": \"A test application template that specifies prohibited property 'id'.\"\r\n },\r\n \"id\" : \"jobid\"\r\n}\r\n\r\n\r\n", - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-blob-type": [ - "BlockBlob" - ], - "x-ms-meta-lastmodified": [ - "1524153754.252081" - ], - "Content-MD5": [ - "HBCHz/rBYi8V9ILMKx0o+g==" - ], - "Content-Length": [ - "146" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "80d32158-c372-11e9-855e-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:hR0xApSOB72e5om0cCsftCq6t6TQCBPBlml5hEdyQYI=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "x-ms-version": [ - "2017-07-29" - ], - "ETag": [ - "\"0x8D72596650DA4E3\"" - ], - "x-ms-request-server-encrypted": [ - "true" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Content-MD5": [ - "HBCHz/rBYi8V9ILMKx0o+g==" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "80dc7146-c372-11e9-8009-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:sUojwrH72/0EicyVIqMB9p0yBqvTbPzFkvTFhIPIXOI=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 409, - "message": "The specified container already exists." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "ContainerAlreadyExists" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Content-Length": [ - "230" - ] - }, - "body": { - "string": "\ufeffContainerAlreadyExistsThe specified container already exists.\nRequestId:1b8de649-401e-00ac-7a7f-574597000000\nTime:2019-08-20T17:46:56.7499687Z" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-prohibitedPoolInfo.json?comp=metadata", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "80e4d51e-c372-11e9-b260-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:DHEDR4AV6oYRQVKtAFTDuWRsnVHWFPQH4jIKjQ/IqOY=" - ] - } - }, - "response": { - "status": { - "code": 404, - "message": "The specified blob does not exist." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "BlobNotFound" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Content-Length": [ - "215" - ] - }, - "body": { - "string": "\ufeffBlobNotFoundThe specified blob does not exist.\nRequestId:1b8de654-401e-00ac-047f-574597000000\nTime:2019-08-20T17:46:56.8080098Z" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-prohibitedPoolInfo.json", - "body": "{\r\n \"templateInfo\": {\r\n \"description\": \"A test application template that specifies prohibited property 'poolInfo'.\"\r\n },\r\n \"poolInfo\": {\r\n \"poolId\" : \"swimming\"\r\n } \r\n}\r\n\r\n\r\n", - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-blob-type": [ - "BlockBlob" - ], - "x-ms-meta-lastmodified": [ - "1524153754.253081" - ], - "Content-MD5": [ - "PFiBkLMhFseOyDvKgJXaRA==" - ], - "Content-Length": [ - "187" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "80ee084c-c372-11e9-9278-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:WOx8e66c/we8WePC6GI/j+a4Yefjb98uLHaLmmRE8ms=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "x-ms-version": [ - "2017-07-29" - ], - "ETag": [ - "\"0x8D72596652884BA\"" - ], - "x-ms-request-server-encrypted": [ - "true" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Content-MD5": [ - "PFiBkLMhFseOyDvKgJXaRA==" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "80fbae86-c372-11e9-a753-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:n3w3i86k0P2B1nLdVvuIWDom3JePIeMsg/fEleo3Xr4=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 409, - "message": "The specified container already exists." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "ContainerAlreadyExists" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Content-Length": [ - "230" - ] - }, - "body": { - "string": "\ufeffContainerAlreadyExistsThe specified container already exists.\nRequestId:1b8de680-401e-00ac-2b7f-574597000000\nTime:2019-08-20T17:46:56.9531128Z" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-prohibitedPriority.json?comp=metadata", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "8103a108-c372-11e9-bf44-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:EoWcxGtkN+HSIuUK+4q6+XjPJVkVfGePJQ7iqmCyRX4=" - ] - } - }, - "response": { - "status": { - "code": 404, - "message": "The specified blob does not exist." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "BlobNotFound" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Content-Length": [ - "215" - ] - }, - "body": { - "string": "\ufeffBlobNotFoundThe specified blob does not exist.\nRequestId:1b8de68f-401e-00ac-397f-574597000000\nTime:2019-08-20T17:46:57.0111534Z" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-prohibitedPriority.json", - "body": "{\r\n \"templateInfo\": {\r\n \"description\": \"A test application template that specifies the prohibited property 'priority'.\"\r\n },\r\n \"displayName\": \"Static Application Template\",\r\n \"priority\": 100\r\n}\r\n\r\n\r\n", - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-blob-type": [ - "BlockBlob" - ], - "x-ms-meta-lastmodified": [ - "1524153754.253081" - ], - "Content-MD5": [ - "IvRrVHIc/lLy/wSkE22LeA==" - ], - "Content-Length": [ - "206" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "810d2dac-c372-11e9-b614-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:P0CLak5Ohx4OfKBc00v8xHdAezC3K44OSG7v4FYsrA4=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "x-ms-version": [ - "2017-07-29" - ], - "ETag": [ - "\"0x8D72596654783FB\"" - ], - "x-ms-request-server-encrypted": [ - "true" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Content-MD5": [ - "IvRrVHIc/lLy/wSkE22LeA==" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "81166c0c-c372-11e9-8fac-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:uv+enZU+r9LvCpPwwR/D5B6Zg4VoYBEw7JZo9cUp/qE=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 409, - "message": "The specified container already exists." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "ContainerAlreadyExists" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Content-Length": [ - "230" - ] - }, - "body": { - "string": "\ufeffContainerAlreadyExistsThe specified container already exists.\nRequestId:1b8de6cf-401e-00ac-767f-574597000000\nTime:2019-08-20T17:46:57.1282358Z" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-static.json?comp=metadata", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "811e4fc6-c372-11e9-8821-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:y6GoVnW8MR3LwBf7FK65lFZ2rUxZGqtD6/ZAtroigo4=" - ] - } - }, - "response": { - "status": { - "code": 404, - "message": "The specified blob does not exist." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "BlobNotFound" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Content-Length": [ - "215" - ] - }, - "body": { - "string": "\ufeffBlobNotFoundThe specified blob does not exist.\nRequestId:1b8de6de-401e-00ac-057f-574597000000\nTime:2019-08-20T17:46:57.1862773Z" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-static.json", - "body": "{\r\n \"templateMetadata\": {\r\n \"description\": \"A test application template that has no parameters and has exactly the same result every time.\"\r\n },\r\n \"jobManagerTask\": {\r\n \"id\": \"jobManager\",\r\n \"displayName\": \"jobManagerDisplay\",\r\n \"commandLine\": \"cmd /c dir /s\",\r\n \"resourceFiles\": [\r\n {\r\n \"httpUrl\": \"https://testacct.blob.core.windows.net/\",\r\n \"filePath\": \"filePath\"\r\n }\r\n ],\r\n \"environmentSettings\": [\r\n {\r\n \"name\": \"name1\",\r\n \"value\": \"value1\"\r\n },\r\n {\r\n \"name\": \"name2\",\r\n \"value\": \"value2\"\r\n }\r\n ],\r\n \"constraints\": {\r\n \"maxWallClockTime\": \"PT1H\"\r\n },\r\n \"killJobOnCompletion\": false,\r\n \"runElevated\": false\r\n }\r\n}\r\n\r\n\r\n", - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-blob-type": [ - "BlockBlob" - ], - "x-ms-meta-lastmodified": [ - "1551725410.0625725" - ], - "Content-MD5": [ - "27HmU8S9AEeu90aG5z1x1A==" - ], - "Content-Length": [ - "740" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "8127eb74-c372-11e9-9d93-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:kqibZJeMVRAejeyXRhBLMMuMCMXwhaPBLynhWJwWyFk=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "x-ms-version": [ - "2017-07-29" - ], - "ETag": [ - "\"0x8D7259665628AED\"" - ], - "x-ms-request-server-encrypted": [ - "true" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Content-MD5": [ - "27HmU8S9AEeu90aG5z1x1A==" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "81313b92-c372-11e9-890f-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:JdxZIXYV2umwVb2BkNy0GmB/ay78/GiWSKm1uUe8W0o=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 409, - "message": "The specified container already exists." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "ContainerAlreadyExists" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Content-Length": [ - "230" - ] - }, - "body": { - "string": "\ufeffContainerAlreadyExistsThe specified container already exists.\nRequestId:1b8de6fe-401e-00ac-247f-574597000000\nTime:2019-08-20T17:46:57.3033597Z" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-unsupportedProperty.json?comp=metadata", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "813903c2-c372-11e9-b043-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:puj+aKP8+0gxbZAqQnUt8D9XY5YZe+UbYqIhKuDgeCI=" - ] - } - }, - "response": { - "status": { - "code": 404, - "message": "The specified blob does not exist." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "BlobNotFound" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Content-Length": [ - "215" - ] - }, - "body": { - "string": "\ufeffBlobNotFoundThe specified blob does not exist.\nRequestId:1b8de714-401e-00ac-397f-574597000000\nTime:2019-08-20T17:46:57.3764113Z" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-unsupportedProperty.json", - "body": "{\r\n \"templateInfo\": {\r\n \"description\": \"A test application template that specifies the unsupported properties'fluxCapacitorModel' and 'vehicleMarque'.\"\r\n },\r\n \"fluxCapacitorModel\": \"DocBrown55\",\r\n \"vehicleMarque\": \"deLorean\"\r\n}\r\n\r\n\r\n", - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-blob-type": [ - "BlockBlob" - ], - "x-ms-meta-lastmodified": [ - "1524153754.2540834" - ], - "Content-MD5": [ - "eSn7zZA04to5Rccq3nxw1A==" - ], - "Content-Length": [ - "240" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "8144c26c-c372-11e9-8e11-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:WlzjrlzCx8rM/moNwAuOR19YCfKtQCdx+lTm+NKJlE4=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "x-ms-version": [ - "2017-07-29" - ], - "ETag": [ - "\"0x8D72596657F3FDC\"" - ], - "x-ms-request-server-encrypted": [ - "true" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Content-MD5": [ - "eSn7zZA04to5Rccq3nxw1A==" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "814dea2e-c372-11e9-97ef-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:+nZzWhxwZ1zYhQQzQuddEu/syDaFhuqNrYEARJyzw+U=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 409, - "message": "The specified container already exists." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "ContainerAlreadyExists" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:56 GMT" - ], - "Content-Length": [ - "230" - ] - }, - "body": { - "string": "\ufeffContainerAlreadyExistsThe specified container already exists.\nRequestId:1b8de745-401e-00ac-657f-574597000000\nTime:2019-08-20T17:46:57.4904920Z" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-untypedParameter.json?comp=metadata", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "8155aff6-c372-11e9-8a35-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:FG2OfkFLZ7szN33Qze4KaZ7+IGfOCCcDB7/4QhUqRfo=" - ] - } - }, - "response": { - "status": { - "code": 404, - "message": "The specified blob does not exist." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "BlobNotFound" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Content-Length": [ - "215" - ] - }, - "body": { - "string": "\ufeffBlobNotFoundThe specified blob does not exist.\nRequestId:1b8de753-401e-00ac-737f-574597000000\nTime:2019-08-20T17:46:57.5455314Z" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-untypedParameter.json", - "body": "{\r\n \"templateMetadata\": {\r\n \"description\": \"A test application template that declares a property with no specified type.\"\r\n },\r\n \"jobManagerTask\": {\r\n \"id\":\"mytask1\",\r\n \"commandLine\":\"myprogram.exe\",\r\n \"resourceFiles\": [ {\r\n \"httpUrl\":\"http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?st=2013-08-09T08%3a49%3a37.0000000Z&se=2013-08-10T08%3a49%3a37.0000000Z&sr=c&sp=d&si=YWJjZGTVMZw%3d%3d&sig= %2bSzBm0wi8xECuGkKw97wnkSZ%2f62sxU%2b6Hq6a7qojIVE%3d\",\r\n \"filePath\":\"myprogram.exe\"\r\n },\r\n {\r\n \"httpUrl\":\"http://mystorage1.blob.core.windows.net/scripts/test.txt?st=2013-08-09T08%3a49%3a37.0000000Z&se=2013-08-10T08%3a49%3a37.0000000Z&sr=c&sp=d&si=YWJjZGTVMZw%3d%3d&sig= %2bSzBm0wi8xECuGkKw97wnkSZ%2f62sxU%2b6Hq6a7qojIVE%3d\",\r\n \"filePath\":\"[parameters('blobName')]\"\r\n } ],\r\n \"environmentSettings\": [ {\r\n \"name\":\"myvariable\",\r\n \"value\":\"myvalue\"\r\n } ],\r\n \"constraints\": {\r\n \"maxWallClockTime\":\"PT1H\",\r\n \"maxTaskRetryCount\":0,\r\n \"retentionTime\":\"PT1H\"\r\n },\r\n \"killJobOnCompletion\":false,\r\n \"runElevated\":false,\r\n \"runExclusive\":true\r\n },\r\n \"metadata\": [ {\r\n \"name\":\"myproperty\",\r\n \"value\":\"[parameters('keyValue')]\"\r\n } ],\r\n \"parameters\": {\r\n \"blobName\" : {\r\n \"defaultValue\": \"name\"\r\n },\r\n \"keyValue\" : {\r\n \"type\": \"string\"\r\n }\r\n }\r\n}", - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-blob-type": [ - "BlockBlob" - ], - "x-ms-meta-lastmodified": [ - "1551725410.0829573" - ], - "Content-MD5": [ - "sWJuTwpMQ9cWToECYRCNiQ==" - ], - "Content-Length": [ - "1363" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "815ed718-c372-11e9-a956-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:vt5gmV/PO3c8e7ibn7aXQ5D9Fn7lhuyS/R/GIfI80aA=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "x-ms-version": [ - "2017-07-29" - ], - "ETag": [ - "\"0x8D7259665993529\"" - ], - "x-ms-request-server-encrypted": [ - "true" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Content-MD5": [ - "sWJuTwpMQ9cWToECYRCNiQ==" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "8167f09a-c372-11e9-86ed-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:BYZqkMgpVii/0xt+fCoQXTlCxclIs64US79BIfNni6w=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 409, - "message": "The specified container already exists." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "ContainerAlreadyExists" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Content-Length": [ - "230" - ] - }, - "body": { - "string": "\ufeffContainerAlreadyExistsThe specified container already exists.\nRequestId:1b8de76e-401e-00ac-0d7f-574597000000\nTime:2019-08-20T17:46:57.6676173Z" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.job.mergetask.json?comp=metadata", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "81709a14-c372-11e9-b736-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:qRJqHxX1XyE4sxXdvs1Nnnj6Jr+pT4Fv3BLukoXzSj8=" - ] - } - }, - "response": { - "status": { - "code": 404, - "message": "The specified blob does not exist." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "BlobNotFound" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Content-Length": [ - "215" - ] - }, - "body": { - "string": "\ufeffBlobNotFoundThe specified blob does not exist.\nRequestId:1b8de777-401e-00ac-167f-574597000000\nTime:2019-08-20T17:46:57.7226567Z" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.job.mergetask.json", - "body": "{\r\n \"parameters\": {\r\n \"jobId\": {\r\n \"type\": \"string\",\r\n \"defaultValue\": \"helloworld-job3\",\r\n \"metadata\": {\r\n \"description\": \"The id of Azure Batch job\"\r\n }\r\n },\r\n \"poolId\": {\r\n \"type\": \"string\",\r\n \"defaultValue\": \"helloworld-pool3\",\r\n \"metadata\": {\r\n \"description\": \"The id of Azure Batch pool which runs the job\"\r\n }\r\n },\r\n \"vmSize\": {\r\n \"type\": \"string\",\r\n \"defaultValue\": \"STANDARD_D1_V2\",\r\n \"metadata\": {\r\n \"description\": \"The size of the virtual machines that run the application\"\r\n }\r\n },\r\n \"vmCount\": {\r\n \"type\": \"int\",\r\n \"defaultValue\": 1,\r\n \"metadata\": {\r\n \"description\": \"The number of virtual machines\"\r\n }\r\n },\r\n \"testData\": {\r\n \"type\": \"string\",\r\n \"defaultValue\": \"in\",\r\n \"metadata\": {\r\n \"description\": \"The auto-storage group where the input data is stored\"\r\n }\r\n },\r\n \"outputData\": {\r\n \"type\": \"string\",\r\n \"defaultValue\": \"output\",\r\n \"metadata\": {\r\n \"description\": \"The auto-storage group where the output data is uploaded\"\r\n }\r\n }\r\n },\r\n \"variables\": {\r\n \"osType\": {\r\n \"publisher\": \"Canonical\",\r\n \"offer\": \"UbuntuServer\",\r\n \"sku\": \"16.04.0-LTS\",\r\n \"version\": \"latest\"\r\n }\r\n },\r\n \"job\": {\r\n \"type\": \"Microsoft.Batch/batchAccounts/jobs\",\r\n \"apiVersion\": \"2018-12-01\",\r\n \"properties\": {\r\n \"id\": \"[parameters('jobId')]\",\r\n \"onAllTasksComplete\": \"terminateJob\",\r\n \"poolInfo\": {\r\n \"autoPoolSpecification\": {\r\n \"autoPoolIdPrefix\": \"[parameters('poolId')]\",\r\n \"poolLifetimeOption\": \"job\",\r\n \"keepAlive\": false,\r\n \"pool\": {\r\n \"vmSize\": \"[parameters('vmSize')]\",\r\n \"virtualMachineConfiguration\": {\r\n \"imageReference\": \"[variables('osType')]\",\r\n \"nodeAgentSKUId\": \"batch.node.ubuntu 16.04\"\r\n },\r\n \"targetDedicatedNodes\": \"[parameters('vmCount')]\"\r\n }\r\n }\r\n },\r\n \"taskFactory\": {\r\n \"type\": \"taskPerFile\",\r\n \"source\" : {\r\n \"fileGroup\" : \"[parameters('testData')]\"\r\n },\r\n \"repeatTask\": {\r\n \"commandLine\": \"/bin/bash -c 'cat {fileName}'\",\r\n \"resourceFiles\": [\r\n {\r\n \"httpUrl\" : \"{url}\",\r\n \"filePath\" : \"{fileName}\"\r\n }\r\n ],\r\n \"outputFiles\": [\r\n {\r\n \"filePattern\": \"**/stdout.txt\",\r\n \"destination\": {\r\n \"autoStorage\": {\r\n \"path\": \"output-{fileName}\",\r\n \"fileGroup\": \"[parameters('outputData')]\"\r\n }\r\n },\r\n \"uploadOptions\": {\r\n \"uploadCondition\": \"TaskSuccess\"\r\n }\r\n }\r\n ]\r\n },\r\n \"mergeTask\" : {\r\n \"displayName\": \"myMergeTask\",\r\n \"commandLine\": \"/bin/bash -c 'ls'\",\r\n \"resourceFiles\": [\r\n {\r\n \"autoStorageContainerName\": \"fgrp-[parameters('outputData')]\"\r\n }\r\n ]\r\n }\r\n }\r\n }\r\n }\r\n}\r\n", - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-blob-type": [ - "BlockBlob" - ], - "x-ms-meta-lastmodified": [ - "1552419189.7367494" - ], - "Content-MD5": [ - "2ILRwlJk1kyfaTTP253tiA==" - ], - "Content-Length": [ - "4072" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "81799c10-c372-11e9-a794-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:HiIiePiYS4ozAlO534lkkMl341I+OmeS1+/lkRmnACY=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "x-ms-version": [ - "2017-07-29" - ], - "ETag": [ - "\"0x8D7259665B43C13\"" - ], - "x-ms-request-server-encrypted": [ - "true" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Content-MD5": [ - "2ILRwlJk1kyfaTTP253tiA==" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "81830890-c372-11e9-bbd3-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:ENnOAE8WLYXq8+5XuZ2jB5g/lqfjVteRU2nwOFysp4k=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 409, - "message": "The specified container already exists." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "ContainerAlreadyExists" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Content-Length": [ - "230" - ] - }, - "body": { - "string": "\ufeffContainerAlreadyExistsThe specified container already exists.\nRequestId:1b8de792-401e-00ac-2f7f-574597000000\nTime:2019-08-20T17:46:57.8397390Z" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.job.parameters.json?comp=metadata", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "818b1362-c372-11e9-aee9-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:ebzKMRM9P3zAIF/g/BQhdOxLuZnO5JHkhdw6Q4aBqH4=" - ] - } - }, - "response": { - "status": { - "code": 404, - "message": "The specified blob does not exist." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "BlobNotFound" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Content-Length": [ - "215" - ] - }, - "body": { - "string": "\ufeffBlobNotFoundThe specified blob does not exist.\nRequestId:1b8de7a1-401e-00ac-3d7f-574597000000\nTime:2019-08-20T17:46:57.8947784Z" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.job.parameters.json", - "body": "{\r\n \"jobId\": {\r\n \"value\": \"helloworld\"\r\n },\r\n \"poolId\": {\r\n \"value\": \"xplatTestPool\"\r\n },\r\n \"outputFileStorageUrl\": {\r\n \"value\": \"\"\r\n },\r\n \"taskStart\": {\r\n \"value\": 1\r\n },\r\n \"taskEnd\": {\r\n \"value\": 3\r\n } \r\n}", - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-blob-type": [ - "BlockBlob" - ], - "x-ms-meta-lastmodified": [ - "1524153754.2550833" - ], - "Content-MD5": [ - "acLZykn1NMEO1oxenbC6dw==" - ], - "Content-Length": [ - "254" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "8193e32e-c372-11e9-91af-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:vwJrUdNT+37UkMIrHd04MJDxS8H+Bt9eN/6Du8s5+ms=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "x-ms-version": [ - "2017-07-29" - ], - "ETag": [ - "\"0x8D7259665CE7F92\"" - ], - "x-ms-request-server-encrypted": [ - "true" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Content-MD5": [ - "acLZykn1NMEO1oxenbC6dw==" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "819d3200-c372-11e9-9696-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:GrHTIYcck903br+PBymwklMeomrK0OqTSeXYDnsHlsk=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 409, - "message": "The specified container already exists." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "ContainerAlreadyExists" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Content-Length": [ - "230" - ] - }, - "body": { - "string": "\ufeffContainerAlreadyExistsThe specified container already exists.\nRequestId:1b8de7d4-401e-00ac-697f-574597000000\nTime:2019-08-20T17:46:58.0108609Z" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.job.parametricsweep.json?comp=metadata", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "81a507e8-c372-11e9-bacc-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:smSKYxOG/nW2OQlNWK58MMNO2NPikBLrx/uhIqw4Sr0=" - ] - } - }, - "response": { - "status": { - "code": 404, - "message": "The specified blob does not exist." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "BlobNotFound" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Content-Length": [ - "215" - ] - }, - "body": { - "string": "\ufeffBlobNotFoundThe specified blob does not exist.\nRequestId:1b8de805-401e-00ac-087f-574597000000\nTime:2019-08-20T17:46:58.0669001Z" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.job.parametricsweep.json", - "body": "{\r\n \"parameters\": {\r\n \"inputFileGroup\": {\r\n \"type\": \"string\",\r\n \"defaultValue\": \"convert_data\",\r\n \"metadata\": {\r\n \"description\": \"The auto-storage group where the input data is stored\"\r\n }\r\n },\r\n \"outputFileStorageUrl\": {\r\n \"type\": \"string\",\r\n \"metadata\": {\r\n \"description\": \"The SAS URL for a container where outputs will be stored\"\r\n }\r\n },\r\n \"inputType\": {\r\n \"type\": \"string\",\r\n \"defaultValue\": \"wav\",\r\n \"metadata\": {\r\n \"description\": \"The extension of the input data\"\r\n }\r\n },\r\n \"poolId\": {\r\n \"type\": \"string\",\r\n \"defaultValue\": \"ffmpeg-pool\",\r\n \"metadata\": {\r\n \"description\": \"The id of Azure Batch pool which runs the job\"\r\n }\r\n },\r\n \"jobId\": {\r\n \"type\": \"string\",\r\n \"metadata\": {\r\n \"description\": \"The id of Azure Batch job\"\r\n }\r\n },\r\n \"taskStart\": {\r\n \"type\": \"int\",\r\n \"metadata\": {\r\n \"description\": \"The sweep start parameter\"\r\n }\r\n },\r\n \"taskEnd\": {\r\n \"type\": \"int\",\r\n \"metadata\": {\r\n \"description\": \"The sweep end parameter\"\r\n }\r\n }\r\n },\r\n \"job\": {\r\n \"type\": \"Microsoft.Batch/batchAccounts/jobs\",\r\n \"apiVersion\": \"2018-12-01\",\r\n \"properties\": {\r\n \"id\": \"[parameters('jobId')]\",\r\n \"constraints\": {\r\n \"maxWallClockTime\": \"PT5H\",\r\n \"maxTaskRetryCount\": 1\r\n },\r\n \"poolInfo\": {\r\n \"poolId\": \"[parameters('poolId')]\"\r\n },\r\n \"taskFactory\": {\r\n \"type\": \"parametricSweep\",\r\n \"parameterSets\": [\r\n {\r\n \"start\": \"[parameters('taskStart')]\",\r\n \"end\": \"[parameters('taskEnd')]\",\r\n \"step\": 1\r\n }\r\n ],\r\n \"repeatTask\": {\r\n \"commandLine\": \"ffmpeg -y -i sample{0}.[parameters('inputType')] -acodec libmp3lame output.mp3\",\r\n \"resourceFiles\": [\r\n {\r\n \"source\": { \r\n \"fileGroup\": \"[parameters('inputFileGroup')]\",\r\n \"prefix\": \"sample{0}.[parameters('inputType')]\"\r\n }\r\n }\r\n ],\r\n \"outputFiles\": [\r\n {\r\n \"filePattern\": \"output.mp3\",\r\n \"destination\": {\r\n \"container\": {\r\n \"path\": \"audio{0}.mp3\",\r\n \"containerUrl\": \"[parameters('outputFileStorageUrl')]\"\r\n }\r\n },\r\n \"uploadOptionsa\": {\r\n \"uploadCondition\": \"TaskSuccess\"\r\n }\r\n }\r\n ],\r\n \"packageReferences\": [\r\n {\r\n \"type\": \"aptPackage\",\r\n \"id\": \"ffmpeg\"\r\n }\r\n ]\r\n }\r\n }\r\n }\r\n }\r\n}\r\n", - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-blob-type": [ - "BlockBlob" - ], - "x-ms-meta-lastmodified": [ - "1551725410.1117144" - ], - "Content-MD5": [ - "BvsOoLG3cYJ873sw8nI4/Q==" - ], - "Content-Length": [ - "3565" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "81ae3462-c372-11e9-a5ed-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:d7pt1RTWtfUce12jQQYyo9nnE7orsW5BDmLW6320GGE=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "x-ms-version": [ - "2017-07-29" - ], - "ETag": [ - "\"0x8D7259665E89BF1\"" - ], - "x-ms-request-server-encrypted": [ - "true" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Content-MD5": [ - "BvsOoLG3cYJ873sw8nI4/Q==" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "81b7f852-c372-11e9-a2c4-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:sJ0htrNRkNxVYOXFRHhjUglkUW2Y09UfdS+iZdomGOg=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 409, - "message": "The specified container already exists." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "ContainerAlreadyExists" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Content-Length": [ - "230" - ] - }, - "body": { - "string": "\ufeffContainerAlreadyExistsThe specified container already exists.\nRequestId:1b8de865-401e-00ac-567f-574597000000\nTime:2019-08-20T17:46:58.1879853Z" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.job.resourcefile-legacy.json?comp=metadata", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "81c00e9e-c372-11e9-8e87-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:UlRP30gLyVD7Mw7bMg7Eg74qJ8H+jJVCO9x8Ix8F+fY=" - ] - } - }, - "response": { - "status": { - "code": 404, - "message": "The specified blob does not exist." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "BlobNotFound" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Content-Length": [ - "215" - ] - }, - "body": { - "string": "\ufeffBlobNotFoundThe specified blob does not exist.\nRequestId:1b8de883-401e-00ac-6d7f-574597000000\nTime:2019-08-20T17:46:58.2460268Z" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.job.resourcefile-legacy.json", - "body": "{\r\n \"job\": {\r\n \"type\": \"Microsoft.Batch/batchAccounts/jobs\",\r\n \"apiVersion\": \"2018-12-01\",\r\n \"properties\": {\r\n \"id\": \"job123\",\r\n \"onAllTasksComplete\": \"terminateJob\",\r\n \"poolInfo\": {\r\n \"autoPoolSpecification\": {\r\n \"autoPoolIdPrefix\": \"pool123\",\r\n \"poolLifetimeOption\": \"job\",\r\n \"keepAlive\": false,\r\n \"pool\": {\r\n \"vmSize\": \"STANDARD_D1_V2\",\r\n \"virtualMachineConfiguration\": {\r\n \"imageReference\": {\r\n \"publisher\": \"Canonical\",\r\n \"offer\": \"UbuntuServer\",\r\n \"sku\": \"16.04.0-LTS\",\r\n \"version\": \"latest\"\r\n },\r\n \"nodeAgentSKUId\": \"batch.node.ubuntu 16.04\"\r\n },\r\n \"targetDedicatedNodes\": \"1\"\r\n }\r\n }\r\n },\r\n \"taskFactory\": {\r\n \"type\": \"taskCollection\",\r\n \"tasks\": [\r\n {\r\n \"id\": \"1\",\r\n \"commandLine\": \"/bin/bash -c 'cat {fileName}'\",\r\n \"resourceFiles\": [\r\n {\r\n \"httpUrl\": \"https://testacct.blob.core.windows.net/\",\r\n \"filePath\": \"location\"\r\n }\r\n ]\r\n }\r\n ]\r\n }\r\n }\r\n }\r\n}", - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-blob-type": [ - "BlockBlob" - ], - "x-ms-meta-lastmodified": [ - "1566231778.0575619" - ], - "Content-MD5": [ - "9iVvz/Pf/FsbJv0miSuvzw==" - ], - "Content-Length": [ - "1245" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "81c90f4a-c372-11e9-8b60-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:04WI+EvqlU97Q+P7w/MrRu0A14cAzqJdTzBL5N4Bipc=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "x-ms-version": [ - "2017-07-29" - ], - "ETag": [ - "\"0x8D7259666041824\"" - ], - "x-ms-request-server-encrypted": [ - "true" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Content-MD5": [ - "9iVvz/Pf/FsbJv0miSuvzw==" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "81d2cf48-c372-11e9-8f39-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:XB6Aa5jJzu0j4BMfQBmzCsliJE1VV971w6N6G2HxNuA=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 409, - "message": "The specified container already exists." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "ContainerAlreadyExists" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Content-Length": [ - "230" - ] - }, - "body": { - "string": "\ufeffContainerAlreadyExistsThe specified container already exists.\nRequestId:1b8de8a6-401e-00ac-0a7f-574597000000\nTime:2019-08-20T17:46:58.3681127Z" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.job.simple.apiversionfail.json?comp=metadata", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "81dba8e8-c372-11e9-ab63-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:EAhUSZGNzbaJVrU9u6AyrO9mlrk/wixV7l7LLEJbSKA=" - ] - } - }, - "response": { - "status": { - "code": 404, - "message": "The specified blob does not exist." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "BlobNotFound" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:57 GMT" - ], - "Content-Length": [ - "215" - ] - }, - "body": { - "string": "\ufeffBlobNotFoundThe specified blob does not exist.\nRequestId:1b8de8be-401e-00ac-197f-574597000000\nTime:2019-08-20T17:46:58.4241524Z" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.job.simple.apiversionfail.json", - "body": "{\r\n \"parameters\": {\r\n \"jobId\": {\r\n \"type\": \"string\",\r\n \"defaultValue\": \"ffmpegpool\",\r\n \"metadata\": {\r\n \"description\": \"The name of Azure Batch pool which runs the job\"\r\n }\r\n },\r\n \"poolId\": {\r\n \"type\": \"string\",\r\n \"metadata\": {\r\n \"description\": \"The name of Azure Batch job\"\r\n }\r\n }\r\n },\r\n \"job\": {\r\n \"type\": \"Microsoft.Batch/batchAccounts/jobs\",\r\n \"apiVersion\": \"2030-12-01\",\r\n \"properties\": {\r\n \"fob\": \"[parameters('jobId')]\",\r\n \"ks\": {\r\n \"poolId\": \"[parameters('poolId')]\"\r\n },\r\n \"ls\": {\r\n \"type\": \"taskCollection\",\r\n \"tasks\": [\r\n {\r\n \"id\" : \"mytask1\",\r\n \"commandLine\": \"cmd /c echo hello1\"\r\n },\r\n {\r\n \"id\" : \"mytask2\",\r\n \"commandLine\": \"cmd /c echo hello2\"\r\n }\r\n ]\r\n }\r\n }\r\n }\r\n}", - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-blob-type": [ - "BlockBlob" - ], - "x-ms-meta-lastmodified": [ - "1551725410.1238391" - ], - "Content-MD5": [ - "kPzKWo4J2zRaerzJw2Z2xg==" - ], - "Content-Length": [ - "1140" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "81e4e49c-c372-11e9-b4ab-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:bISP5Ol4D+TPYD0ta5Ybw17Ebd8GxbauTubj1MKisb4=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "x-ms-version": [ - "2017-07-29" - ], - "ETag": [ - "\"0x8D72596661F1F0E\"" - ], - "x-ms-request-server-encrypted": [ - "true" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Content-MD5": [ - "kPzKWo4J2zRaerzJw2Z2xg==" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "81f05674-c372-11e9-a484-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:oyLddcBj+iVceEOZoqE3E9y2bf2OZP1jYzdLZEZGULA=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 409, - "message": "The specified container already exists." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "ContainerAlreadyExists" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Content-Length": [ - "230" - ] - }, - "body": { - "string": "\ufeffContainerAlreadyExistsThe specified container already exists.\nRequestId:1b8de8f5-401e-00ac-4e7f-574597000000\nTime:2019-08-20T17:46:58.5572469Z" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.job.simple.json?comp=metadata", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "81f86ca2-c372-11e9-a77b-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:tnQy9IhNBOzKHrDR7Ott+CpU/7ZUjiCDrKMkr7GybVQ=" - ] - } - }, - "response": { - "status": { - "code": 404, - "message": "The specified blob does not exist." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "BlobNotFound" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Content-Length": [ - "215" - ] - }, - "body": { - "string": "\ufeffBlobNotFoundThe specified blob does not exist.\nRequestId:1b8de91a-401e-00ac-707f-574597000000\nTime:2019-08-20T17:46:58.6142873Z" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.job.simple.json", - "body": "{\r\n \"parameters\": {\r\n \"jobId\": {\r\n \"type\": \"string\",\r\n \"defaultValue\": \"ffmpegpool\",\r\n \"metadata\": {\r\n \"description\": \"The name of Azure Batch pool which runs the job\"\r\n }\r\n },\r\n \"poolId\": {\r\n \"type\": \"string\",\r\n \"metadata\": {\r\n \"description\": \"The name of Azure Batch job\"\r\n }\r\n }\r\n },\r\n \"job\": {\r\n \"type\": \"Microsoft.Batch/batchAccounts/jobs\",\r\n \"properties\": {\r\n \"id\": \"[parameters('jobId')]\",\r\n \"poolInfo\": {\r\n \"poolId\": \"[parameters('poolId')]\"\r\n },\r\n \"taskFactory\": {\r\n \"type\": \"taskCollection\",\r\n \"tasks\": [\r\n {\r\n \"id\" : \"mytask1\",\r\n \"commandLine\": \"cmd /c echo hello1\"\r\n },\r\n {\r\n \"id\" : \"mytask2\",\r\n \"commandLine\": \"cmd /c echo hello2\"\r\n }\r\n ]\r\n }\r\n }\r\n }\r\n}", - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-blob-type": [ - "BlockBlob" - ], - "x-ms-meta-lastmodified": [ - "1551725410.1388402" - ], - "Content-MD5": [ - "rdYsR2RLvOnr9kOYg2y5NQ==" - ], - "Content-Length": [ - "1117" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "8201bc02-c372-11e9-9adf-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:bY5Gtj+1Ra4tusu05O2bYhKeXxyAH8Om4nXmhARWVo4=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "x-ms-version": [ - "2017-07-29" - ], - "ETag": [ - "\"0x8D72596663C2230\"" - ], - "x-ms-request-server-encrypted": [ - "true" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Content-MD5": [ - "rdYsR2RLvOnr9kOYg2y5NQ==" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "820ac286-c372-11e9-9f17-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:LFWBY+5I/pKxw3MmBv+WIQjVyfrOu1ZfCVOo0lDd/IQ=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 409, - "message": "The specified container already exists." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "ContainerAlreadyExists" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Content-Length": [ - "230" - ] - }, - "body": { - "string": "\ufeffContainerAlreadyExistsThe specified container already exists.\nRequestId:1b8de94f-401e-00ac-1f7f-574597000000\nTime:2019-08-20T17:46:58.7313701Z" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.pool.parameters.json?comp=metadata", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "82130300-c372-11e9-b03b-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:UioNHQOJD7oL1mMPbk48X5r7euTVzPox7yr4JT2lZbo=" - ] - } - }, - "response": { - "status": { - "code": 404, - "message": "The specified blob does not exist." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "BlobNotFound" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Content-Length": [ - "215" - ] - }, - "body": { - "string": "\ufeffBlobNotFoundThe specified blob does not exist.\nRequestId:1b8de95a-401e-00ac-287f-574597000000\nTime:2019-08-20T17:46:58.7884104Z" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.pool.parameters.json", - "body": "{\r\n \"poolName\": {\r\n \"value\": \"testpool1\"\r\n }\r\n}", - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-blob-type": [ - "BlockBlob" - ], - "x-ms-meta-lastmodified": [ - "1524153754.2550833" - ], - "Content-MD5": [ - "UJHCaZ8IYHwM3l1BfEkTHQ==" - ], - "Content-Length": [ - "52" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "821c4634-c372-11e9-8fe6-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:+tgEQGeVKMlMIhqlmsiu4czySuXJILtsTvXGNrsXZNA=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "x-ms-version": [ - "2017-07-29" - ], - "ETag": [ - "\"0x8D72596665665A6\"" - ], - "x-ms-request-server-encrypted": [ - "true" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Content-MD5": [ - "UJHCaZ8IYHwM3l1BfEkTHQ==" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "822517cc-c372-11e9-870a-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:CZn13JpC0XnXhV+3xEFUlxobODKO24NbyKJobG3qpOg=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 409, - "message": "The specified container already exists." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "ContainerAlreadyExists" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Content-Length": [ - "230" - ] - }, - "body": { - "string": "\ufeffContainerAlreadyExistsThe specified container already exists.\nRequestId:1b8de97a-401e-00ac-407f-574597000000\nTime:2019-08-20T17:46:58.9054928Z" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.pool.simple.apiversionfail.json?comp=metadata", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "822d99ca-c372-11e9-a34e-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:JPdXM45kJJwrdzLYkONNHPwo2NkL9KECdauuLyTdBtA=" - ] - } - }, - "response": { - "status": { - "code": 404, - "message": "The specified blob does not exist." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "BlobNotFound" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Content-Length": [ - "215" - ] - }, - "body": { - "string": "\ufeffBlobNotFoundThe specified blob does not exist.\nRequestId:1b8de992-401e-00ac-547f-574597000000\nTime:2019-08-20T17:46:58.9605317Z" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.pool.simple.apiversionfail.json", - "body": "{\r\n \"parameters\": {\r\n \"vmSize\": {\r\n \"type\": \"string\",\r\n \"metadata\": {\r\n \"description\": \"The size of the virtual machines that runs the application\"\r\n },\r\n \"defaultValue\": \"STANDARD_D1\",\r\n \"allowedValues\": [\r\n \"STANDARD_A1\",\r\n \"STANDARD_A2\",\r\n \"STANDARD_A3\",\r\n \"STANDARD_A4\",\r\n \"STANDARD_D1\",\r\n \"STANDARD_D2\",\r\n \"STANDARD_D3\",\r\n \"STANDARD_D4\"\r\n ]\r\n },\r\n \"vmCount\": {\r\n \"type\": \"int\",\r\n \"defaultValue\": 3,\r\n \"metadata\": {\r\n \"description\": \"The number of the virtual machines\"\r\n }\r\n },\r\n \"poolName\": {\r\n \"type\": \"string\",\r\n \"defaultValue\": \"ffmpegpool\",\r\n \"metadata\": {\r\n \"description\": \"The name of Azure Batch pool\"\r\n }\r\n }\r\n },\r\n \"variables\": {\r\n \"osType\": {\r\n \"publisher\": \"Canonical\",\r\n \"offer\": \"UbuntuServer\",\r\n \"sku\": \"15.10\",\r\n \"version\": \"latest\"\r\n }\r\n }, \r\n \"pool\": {\r\n \"type\": \"Microsoft.Batch/batchAccounts/pools\",\r\n \"apiVersion\": \"2030-12-01\",\r\n \"properties\": {\r\n \"ls\": \"[parameters('poolName')]\",\r\n \"fob\": {\r\n \"imageReference\": \"[variables('osType')]\",\r\n \"nodeAgentSKUId\": \"batch.node.debian 8\"\r\n },\r\n \"new\": \"[parameters('vmSize')]\",\r\n \"vmCount\": \"[parameters('vmCount')]\",\r\n \"enableAutoScale\": false\r\n }\r\n }\r\n}", - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-blob-type": [ - "BlockBlob" - ], - "x-ms-meta-lastmodified": [ - "1551725410.1398392" - ], - "Content-MD5": [ - "Vx+oh7HSZQSJ/EMS4CWYdQ==" - ], - "Content-Length": [ - "1685" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "82367d46-c372-11e9-bd92-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:D6j6/po+OnhhMT07uTIVXop0EhsKI/dWKJtHZqwCsKE=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "x-ms-version": [ - "2017-07-29" - ], - "ETag": [ - "\"0x8D725966671457D\"" - ], - "x-ms-request-server-encrypted": [ - "true" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:46:59 GMT" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Content-MD5": [ - "Vx+oh7HSZQSJ/EMS4CWYdQ==" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "823fdf88-c372-11e9-a487-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:59 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:ekw/OceRkjtyG7lgDjjFg7Mp8gFyWI+pNVvB30bYlT8=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 409, - "message": "The specified container already exists." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "ContainerAlreadyExists" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Content-Length": [ - "230" - ] - }, - "body": { - "string": "\ufeffContainerAlreadyExistsThe specified container already exists.\nRequestId:1b8de9d4-401e-00ac-0c7f-574597000000\nTime:2019-08-20T17:46:59.0836196Z" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.pool.simple.json?comp=metadata", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "8248b924-c372-11e9-a7f3-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:59 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:dhBH8ZtJZrANHlnhWRCMLOfN9vaKXaWHM7Thnoxr9Jc=" - ] - } - }, - "response": { - "status": { - "code": 404, - "message": "The specified blob does not exist." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "BlobNotFound" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Content-Length": [ - "215" - ] - }, - "body": { - "string": "\ufeffBlobNotFoundThe specified blob does not exist.\nRequestId:1b8de9ea-401e-00ac-1e7f-574597000000\nTime:2019-08-20T17:46:59.1416607Z" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.pool.simple.json", - "body": "{\r\n \"parameters\": {\r\n \"vmSize\": {\r\n \"type\": \"string\",\r\n \"metadata\": {\r\n \"description\": \"The size of the virtual machines that runs the application\"\r\n },\r\n \"defaultValue\": \"STANDARD_D1\",\r\n \"allowedValues\": [\r\n \"STANDARD_A1\",\r\n \"STANDARD_A2\",\r\n \"STANDARD_A3\",\r\n \"STANDARD_A4\",\r\n \"STANDARD_D1\",\r\n \"STANDARD_D2\",\r\n \"STANDARD_D3\",\r\n \"STANDARD_D4\"\r\n ]\r\n },\r\n \"vmCount\": {\r\n \"type\": \"int\",\r\n \"defaultValue\": 3,\r\n \"metadata\": {\r\n \"description\": \"The number of the virtual machines\"\r\n }\r\n },\r\n \"poolName\": {\r\n \"type\": \"string\",\r\n \"defaultValue\": \"ffmpegpool\",\r\n \"metadata\": {\r\n \"description\": \"The name of Azure Batch pool\"\r\n }\r\n }\r\n },\r\n \"variables\": {\r\n \"osType\": {\r\n \"publisher\": \"Canonical\",\r\n \"offer\": \"UbuntuServer\",\r\n \"sku\": \"15.10\",\r\n \"version\": \"latest\"\r\n }\r\n }, \r\n \"pool\": {\r\n \"type\": \"Microsoft.Batch/batchAccounts/pools\",\r\n \"properties\": {\r\n \"id\": \"[parameters('poolName')]\",\r\n \"virtualMachineConfiguration\": {\r\n \"imageReference\": \"[variables('osType')]\",\r\n \"nodeAgentSKUId\": \"batch.node.debian 8\"\r\n },\r\n \"vmSize\": \"[parameters('vmSize')]\",\r\n \"vmCount\": \"[parameters('vmCount')]\",\r\n \"enableAutoScale\": false\r\n }\r\n }\r\n}", - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-blob-type": [ - "BlockBlob" - ], - "x-ms-meta-lastmodified": [ - "1551725410.1458392" - ], - "Content-MD5": [ - "sLKLnAZ4LgaLVKq3l8hvhw==" - ], - "Content-Length": [ - "1675" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "82525618-c372-11e9-84ff-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:59 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:EARC47wmD6K78Xf2aLF74ONWXcvPVO/8Ay9A4EALlxM=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "x-ms-version": [ - "2017-07-29" - ], - "ETag": [ - "\"0x8D72596668D8528\"" - ], - "x-ms-request-server-encrypted": [ - "true" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:46:59 GMT" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Content-MD5": [ - "sLKLnAZ4LgaLVKq3l8hvhw==" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "825c4118-c372-11e9-b559-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:59 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:803MP01txGMeSWWdWQYX6fRajrQU2TYHk4/T8wPPipQ=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 409, - "message": "The specified container already exists." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "ContainerAlreadyExists" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Content-Length": [ - "230" - ] - }, - "body": { - "string": "\ufeffContainerAlreadyExistsThe specified container already exists.\nRequestId:1b8dea0f-401e-00ac-407f-574597000000\nTime:2019-08-20T17:46:59.2627459Z" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.pool.simple.resourcefile-legacy.json?comp=metadata", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "82640948-c372-11e9-bd15-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:59 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:kRH7irBX3VIpPr0DdP/H18gqyCrnWUTk6ugdwfUfmYo=" - ] - } - }, - "response": { - "status": { - "code": 404, - "message": "The specified blob does not exist." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "BlobNotFound" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Content-Length": [ - "215" - ] - }, - "body": { - "string": "\ufeffBlobNotFoundThe specified blob does not exist.\nRequestId:1b8dea19-401e-00ac-477f-574597000000\nTime:2019-08-20T17:46:59.3217877Z" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.pool.simple.resourcefile-legacy.json", - "body": "{\r\n \"pool\": {\r\n \"id\": \"blobsource1\",\r\n \"displayName\": \"Blender Ubuntu standard pool\",\r\n \"vmSize\": \"Standard_D1_v2\",\r\n \"virtualMachineConfiguration\": {\r\n \"imageReference\": {\r\n \"publisher\": \"Canonical\",\r\n \"offer\" : \"UbuntuServer\",\r\n \"sku\": \"16.04.0-LTS\",\r\n \"version\": \"latest\"\r\n },\r\n \"nodeAgentSKUId\": \"batch.node.ubuntu 16.04\"\r\n },\r\n \"targetDedicatedNodes\": \"1\",\r\n \"targetLowPriorityNodes\": \"0\",\r\n \"enableAutoScale\": false,\r\n \"startTask\": {\r\n \"commandLine\": \"sleep 1\",\r\n \"waitForSuccess\": true,\r\n \"maxTaskRetryCount\": 0,\r\n \"userIdentity\": {\r\n \"autoUser\": {\r\n \"scope\": \"pool\",\r\n \"elevationLevel\": \"admin\"\r\n }\r\n },\r\n \"resourceFiles\": [\r\n {\r\n \"blobSource\": \"https://raw.githubusercontent.com/Azure/BatchExplorer-data/master/ncj/blender/scripts/setup-linux-pool.sh\",\r\n \"filePath\": \"setup-linux-pool.sh\"\r\n }\r\n ]\r\n }\r\n }\r\n}", - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-blob-type": [ - "BlockBlob" - ], - "x-ms-meta-lastmodified": [ - "1566231778.058535" - ], - "Content-MD5": [ - "8QUSX+KCWV/drjPRRW4cpA==" - ], - "Content-Length": [ - "990" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "826d1e1a-c372-11e9-9e88-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:46:59 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:i8cXonXbgca6hMBplsXz3LdRX5FXBYKBXy+ydBhdLPM=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "x-ms-version": [ - "2017-07-29" - ], - "ETag": [ - "\"0x8D7259666A88C11\"" - ], - "x-ms-request-server-encrypted": [ - "true" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:46:59 GMT" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:58 GMT" - ], - "Content-MD5": [ - "8QUSX+KCWV/drjPRRW4cpA==" - ], - "Content-Length": [ - "0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Batch/batchAccounts?api-version=2019-08-01", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-mgmt-batch/7.0.0 Azure-SDK-For-Python" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL21hbmFnZW1lbnQuY29yZS53aW5kb3dzLm5ldC8iLCJpc3MiOiJodHRwczovL3N0cy53aW5kb3dzLm5ldC83MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDcvIiwiaWF0IjoxNTY2MzIwNzE4LCJuYmYiOjE1NjYzMjA3MTgsImV4cCI6MTU2NjMyNDYxOCwiX2NsYWltX25hbWVzIjp7Imdyb3VwcyI6InNyYzEifSwiX2NsYWltX3NvdXJjZXMiOnsic3JjMSI6eyJlbmRwb2ludCI6Imh0dHBzOi8vZ3JhcGgud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3L3VzZXJzLzI3MjQ0OTM1LWE0YjktNDBhNC1hMjcyLTQyOTQyYjY3ZWNmMS9nZXRNZW1iZXJPYmplY3RzIn19LCJhY3IiOiIxIiwiYWlvIjoiQVVRQXUvOE1BQUFBT0lvTEVUclJGWnErQmFoaVNrVmhNNXR5QzYwSDZBSDNlZG5vMGJQbTFRYUtvV3Rva01QaDdiZjIvM0VFZ0NHbmo0UFFWY3FHaXdVbkFQYjRONmZwZ1E9PSIsImFtciI6WyJyc2EiLCJtZmEiXSwiYXBwaWQiOiIwNGIwNzc5NS04ZGRiLTQ2MWEtYmJlZS0wMmY5ZTFiZjdiNDYiLCJhcHBpZGFjciI6IjAiLCJkZXZpY2VpZCI6IjFkNTBjNWRmLWZkMDEtNGE2NC04ODU5LTg0NzIzNzU4YTI0NCIsImZhbWlseV9uYW1lIjoiS2xlaW4iLCJnaXZlbl9uYW1lIjoiQnJhbmRvbiIsImlwYWRkciI6IjEzMS4xMDcuMTU5LjIyIiwibmFtZSI6IkJyYW5kb24gS2xlaW4iLCJvaWQiOiIyNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEiLCJvbnByZW1fc2lkIjoiUy0xLTUtMjEtMjEyNzUyMTE4NC0xNjA0MDEyOTIwLTE4ODc5Mjc1MjctMzA4NjkxNzQiLCJwdWlkIjoiMTAwMzAwMDBBOTE3Nzg5RSIsInNjcCI6InVzZXJfaW1wZXJzb25hdGlvbiIsInN1YiI6IjMtaVZMWlVxZzhyWVVFNHlLRXZPSktES0N2Z1I0SVJvQXJhVzlRWmJNRkEiLCJ0aWQiOiI3MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDciLCJ1bmlxdWVfbmFtZSI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInVwbiI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInV0aSI6IlZkQ05pT2w3Z0UyWkw3QTVBMFFPQUEiLCJ2ZXIiOiIxLjAifQ.XjlVAUievRf_e8bKWsAY7Ca1e2RR2FIB4PpXBKa6Vzy5xfZ_c33OFQWkB610FXt-E86jl61B0siTx1aVQQbXt9iAdqcfb27MKeDX_sXi_BjTUIA6xgfRm1CnG8vFq_GpLPy0GIgzuQkaPqPifXIz39SzMavmrLaAp5Ct1j09e9yXwcIxLhSRg_WibgqY22tbcremd_-y9qZex3xEzc798Nz62_AADDKgBjivlwxGX5TpOiEZxhNhD6pS4nlTJ4eiyS7mFRC1nIGB1SMZrgnWjQ5dRcib_7krgdW_4J-kqA-Tg4FGo8aPFBxjMADxfCOF04W2KykUZpLfF_9c2HZGoQ" - ], - "x-ms-client-request-id": [ - "82998366-c372-11e9-9e3b-44032c851686" - ], - "accept-language": [ - "en-US" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json; charset=utf-8" - ], - "Expires": [ - "-1" - ], - "Vary": [ - "Accept-Encoding" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:59 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "content-length": [ - "2729" - ], - "x-ms-original-request-ids": [ - "3b657330-ec08-41c6-8ef2-7e18f8e8b064", - "4f86fc36-4ea9-4941-b558-9768b236889e" - ], - "Cache-Control": [ - "no-cache" - ], - "Pragma": [ - "no-cache" - ] - }, - "body": { - "string": "{\"value\":[{\"id\":\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Batch/batchAccounts/byossc\",\"name\":\"byossc\",\"type\":\"Microsoft.Batch/batchAccounts\",\"location\":\"southcentralus\",\"properties\":{\"accountEndpoint\":\"byossc.southcentralus.batch.azure.com\",\"provisioningState\":\"Succeeded\",\"dedicatedCoreQuotaPerVMFamilyEnforced\":false,\"poolQuota\":100,\"activeJobAndJobScheduleQuota\":300,\"poolAllocationMode\":\"UserSubscription\",\"keyVaultReference\":{\"id\":\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.KeyVault/vaults/byossc\",\"url\":\"https://byossc.vault.azure.net/\"}}},{\"id\":\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Batch/batchAccounts/sdktest2\",\"name\":\"sdktest2\",\"type\":\"Microsoft.Batch/batchAccounts\",\"location\":\"westcentralus\",\"properties\":{\"accountEndpoint\":\"sdktest2.westcentralus.batch.azure.com\",\"provisioningState\":\"Succeeded\",\"dedicatedCoreQuota\":20,\"dedicatedCoreQuotaPerVMFamily\":[{\"name\":\"standardA0_A7Family\",\"coreQuota\":20},{\"name\":\"standardDv2Family\",\"coreQuota\":20},{\"name\":\"standardA8_A11Family\",\"coreQuota\":0},{\"name\":\"standardDFamily\",\"coreQuota\":0},{\"name\":\"standardGFamily\",\"coreQuota\":0},{\"name\":\"basicAFamily\",\"coreQuota\":0},{\"name\":\"standardFFamily\",\"coreQuota\":0},{\"name\":\"standardNVFamily\",\"coreQuota\":0},{\"name\":\"standardNVPromoFamily\",\"coreQuota\":0},{\"name\":\"standardNCFamily\",\"coreQuota\":0},{\"name\":\"standardNCPromoFamily\",\"coreQuota\":0},{\"name\":\"standardHFamily\",\"coreQuota\":0},{\"name\":\"standardHPromoFamily\",\"coreQuota\":0},{\"name\":\"standardAv2Family\",\"coreQuota\":0},{\"name\":\"standardMSFamily\",\"coreQuota\":0},{\"name\":\"standardDv3Family\",\"coreQuota\":0},{\"name\":\"standardEv3Family\",\"coreQuota\":0},{\"name\":\"standardDSFamily\",\"coreQuota\":0},{\"name\":\"standardDSv2Family\",\"coreQuota\":0},{\"name\":\"standardDSv3Family\",\"coreQuota\":0},{\"name\":\"standardFSFamily\",\"coreQuota\":0},{\"name\":\"standardESv3Family\",\"coreQuota\":0},{\"name\":\"standardGSFamily\",\"coreQuota\":0},{\"name\":\"standardLSFamily\",\"coreQuota\":0},{\"name\":\"standardNCSv2Family\",\"coreQuota\":0},{\"name\":\"standardNDSFamily\",\"coreQuota\":0},{\"name\":\"standardNCSv3Family\",\"coreQuota\":0},{\"name\":\"standardFSv2Family\",\"coreQuota\":0},{\"name\":\"standardHBSFamily\",\"coreQuota\":0},{\"name\":\"standardHCSFamily\",\"coreQuota\":0}],\"dedicatedCoreQuotaPerVMFamilyEnforced\":false,\"lowPriorityCoreQuota\":100,\"poolQuota\":100,\"activeJobAndJobScheduleQuota\":300,\"autoStorage\":{\"storageAccountId\":\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Storage/storageAccounts/sdkteststore2\",\"lastKeySync\":\"2019-07-16T21:55:40.4909987Z\"},\"poolAllocationMode\":\"BatchService\"},\"tags\":{\"rawr\":\"test\"}}]}" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Storage/storageAccounts/sdkteststore2/listKeys?api-version=2018-02-01", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-mgmt-storage/2.0.0 Azure-SDK-For-Python" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL21hbmFnZW1lbnQuY29yZS53aW5kb3dzLm5ldC8iLCJpc3MiOiJodHRwczovL3N0cy53aW5kb3dzLm5ldC83MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDcvIiwiaWF0IjoxNTY2MzIwNzE4LCJuYmYiOjE1NjYzMjA3MTgsImV4cCI6MTU2NjMyNDYxOCwiX2NsYWltX25hbWVzIjp7Imdyb3VwcyI6InNyYzEifSwiX2NsYWltX3NvdXJjZXMiOnsic3JjMSI6eyJlbmRwb2ludCI6Imh0dHBzOi8vZ3JhcGgud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3L3VzZXJzLzI3MjQ0OTM1LWE0YjktNDBhNC1hMjcyLTQyOTQyYjY3ZWNmMS9nZXRNZW1iZXJPYmplY3RzIn19LCJhY3IiOiIxIiwiYWlvIjoiQVVRQXUvOE1BQUFBT0lvTEVUclJGWnErQmFoaVNrVmhNNXR5QzYwSDZBSDNlZG5vMGJQbTFRYUtvV3Rva01QaDdiZjIvM0VFZ0NHbmo0UFFWY3FHaXdVbkFQYjRONmZwZ1E9PSIsImFtciI6WyJyc2EiLCJtZmEiXSwiYXBwaWQiOiIwNGIwNzc5NS04ZGRiLTQ2MWEtYmJlZS0wMmY5ZTFiZjdiNDYiLCJhcHBpZGFjciI6IjAiLCJkZXZpY2VpZCI6IjFkNTBjNWRmLWZkMDEtNGE2NC04ODU5LTg0NzIzNzU4YTI0NCIsImZhbWlseV9uYW1lIjoiS2xlaW4iLCJnaXZlbl9uYW1lIjoiQnJhbmRvbiIsImlwYWRkciI6IjEzMS4xMDcuMTU5LjIyIiwibmFtZSI6IkJyYW5kb24gS2xlaW4iLCJvaWQiOiIyNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEiLCJvbnByZW1fc2lkIjoiUy0xLTUtMjEtMjEyNzUyMTE4NC0xNjA0MDEyOTIwLTE4ODc5Mjc1MjctMzA4NjkxNzQiLCJwdWlkIjoiMTAwMzAwMDBBOTE3Nzg5RSIsInNjcCI6InVzZXJfaW1wZXJzb25hdGlvbiIsInN1YiI6IjMtaVZMWlVxZzhyWVVFNHlLRXZPSktES0N2Z1I0SVJvQXJhVzlRWmJNRkEiLCJ0aWQiOiI3MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDciLCJ1bmlxdWVfbmFtZSI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInVwbiI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInV0aSI6IlZkQ05pT2w3Z0UyWkw3QTVBMFFPQUEiLCJ2ZXIiOiIxLjAifQ.XjlVAUievRf_e8bKWsAY7Ca1e2RR2FIB4PpXBKa6Vzy5xfZ_c33OFQWkB610FXt-E86jl61B0siTx1aVQQbXt9iAdqcfb27MKeDX_sXi_BjTUIA6xgfRm1CnG8vFq_GpLPy0GIgzuQkaPqPifXIz39SzMavmrLaAp5Ct1j09e9yXwcIxLhSRg_WibgqY22tbcremd_-y9qZex3xEzc798Nz62_AADDKgBjivlwxGX5TpOiEZxhNhD6pS4nlTJ4eiyS7mFRC1nIGB1SMZrgnWjQ5dRcib_7krgdW_4J-kqA-Tg4FGo8aPFBxjMADxfCOF04W2KykUZpLfF_9c2HZGoQ" - ], - "Content-Type": [ - "application/json; charset=utf-8" - ], - "x-ms-client-request-id": [ - "83058346-c372-11e9-90b3-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json" - ], - "Expires": [ - "-1" - ], - "Vary": [ - "Accept-Encoding" - ], - "Date": [ - "Tue, 20 Aug 2019 17:46:59 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "Server": [ - "Microsoft-Azure-Storage-Resource-Provider/1.0,Microsoft-HTTPAPI/2.0 Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "Cache-Control": [ - "no-cache" - ], - "x-ms-ratelimit-remaining-subscription-writes": [ - "1199" - ], - "Pragma": [ - "no-cache" - ], - "content-length": [ - "288" - ] - }, - "body": { - "string": "{\"keys\":[{\"keyName\":\"key1\",\"value\":\"abc==\",\"permissions\":\"FULL\"},{\"keyName\":\"key2\",\"value\":\"def==\",\"permissions\":\"FULL\"}]}" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-in?restype=container&comp=list", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "8342107a-c372-11e9-9429-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:47:00 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:vTeL6mjkIJba50FLFSeujlnzlhMOEpcAd9H/4SmCVII=" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "Date": [ - "Tue, 20 Aug 2019 17:47:00 GMT" - ] - }, - "body": { - "string": "\ufeffbatch-applicationTemplate-parameters.jsonTue, 20 Aug 2019 17:46:56 GMT0x8D7259664CFA65D1377application/octet-streamGhvIqLxdhgZSxhi6CdQjLQ==BlockBlobHottrueunlockedavailabletruebatch-applicationTemplate-prohibitedApplicationTemplateInfo.jsonTue, 20 Aug 2019 17:46:56 GMT0x8D7259664F3133A219application/octet-streamxbCZcjm1pOMcwR8Td2yo9w==BlockBlobHottrueunlockedavailabletruebatch-applicationTemplate-prohibitedId.jsonTue, 20 Aug 2019 17:46:56 GMT0x8D72596650DA4E3146application/octet-streamHBCHz/rBYi8V9ILMKx0o+g==BlockBlobHottrueunlockedavailabletruebatch-applicationTemplate-prohibitedPoolInfo.jsonTue, 20 Aug 2019 17:46:56 GMT0x8D72596652884BA187application/octet-streamPFiBkLMhFseOyDvKgJXaRA==BlockBlobHottrueunlockedavailabletruebatch-applicationTemplate-prohibitedPriority.jsonTue, 20 Aug 2019 17:46:57 GMT0x8D72596654783FB206application/octet-streamIvRrVHIc/lLy/wSkE22LeA==BlockBlobHottrueunlockedavailabletruebatch-applicationTemplate-static.jsonTue, 20 Aug 2019 17:46:57 GMT0x8D7259665628AED740application/octet-stream27HmU8S9AEeu90aG5z1x1A==BlockBlobHottrueunlockedavailabletruebatch-applicationTemplate-unsupportedProperty.jsonTue, 20 Aug 2019 17:46:57 GMT0x8D72596657F3FDC240application/octet-streameSn7zZA04to5Rccq3nxw1A==BlockBlobHottrueunlockedavailabletruebatch-applicationTemplate-untypedParameter.jsonTue, 20 Aug 2019 17:46:57 GMT0x8D72596659935291363application/octet-streamsWJuTwpMQ9cWToECYRCNiQ==BlockBlobHottrueunlockedavailabletruebatch.job.mergetask.jsonTue, 20 Aug 2019 17:46:57 GMT0x8D7259665B43C134072application/octet-stream2ILRwlJk1kyfaTTP253tiA==BlockBlobHottrueunlockedavailabletruebatch.job.parameters.jsonTue, 20 Aug 2019 17:46:57 GMT0x8D7259665CE7F92254application/octet-streamacLZykn1NMEO1oxenbC6dw==BlockBlobHottrueunlockedavailabletruebatch.job.parametricsweep.jsonTue, 20 Aug 2019 17:46:58 GMT0x8D7259665E89BF13565application/octet-streamBvsOoLG3cYJ873sw8nI4/Q==BlockBlobHottrueunlockedavailabletruebatch.job.resourcefile-legacy.jsonTue, 20 Aug 2019 17:46:58 GMT0x8D72596660418241245application/octet-stream9iVvz/Pf/FsbJv0miSuvzw==BlockBlobHottrueunlockedavailabletruebatch.job.simple.apiversionfail.jsonTue, 20 Aug 2019 17:46:58 GMT0x8D72596661F1F0E1140application/octet-streamkPzKWo4J2zRaerzJw2Z2xg==BlockBlobHottrueunlockedavailabletruebatch.job.simple.jsonTue, 20 Aug 2019 17:46:58 GMT0x8D72596663C22301117application/octet-streamrdYsR2RLvOnr9kOYg2y5NQ==BlockBlobHottrueunlockedavailabletruebatch.pool.parameters.jsonTue, 20 Aug 2019 17:46:58 GMT0x8D72596665665A652application/octet-streamUJHCaZ8IYHwM3l1BfEkTHQ==BlockBlobHottrueunlockedavailabletruebatch.pool.simple.apiversionfail.jsonTue, 20 Aug 2019 17:46:59 GMT0x8D725966671457D1685application/octet-streamVx+oh7HSZQSJ/EMS4CWYdQ==BlockBlobHottrueunlockedavailabletruebatch.pool.simple.jsonTue, 20 Aug 2019 17:46:59 GMT0x8D72596668D85281675application/octet-streamsLKLnAZ4LgaLVKq3l8hvhw==BlockBlobHottrueunlockedavailabletruebatch.pool.simple.resourcefile-legacy.jsonTue, 20 Aug 2019 17:46:59 GMT0x8D7259666A88C11990application/octet-stream8QUSX+KCWV/drjPRRW4cpA==BlockBlobHottrueunlockedavailabletrue" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-output?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "836bfcc8-c372-11e9-8fca-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:47:01 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:b0bewNaXwslaFD2Ok2av6z7YJqQSlDTVLu9HAmesEAI=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 409, - "message": "The specified container already exists." - }, - "headers": { - "Content-Type": [ - "application/xml" - ], - "x-ms-error-code": [ - "ContainerAlreadyExists" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "Date": [ - "Tue, 20 Aug 2019 17:47:00 GMT" - ], - "Content-Length": [ - "230" - ] - }, - "body": { - "string": "\ufeffContainerAlreadyExistsThe specified container already exists.\nRequestId:b37acef4-001e-0045-2a7f-57b991000000\nTime:2019-08-20T17:47:01.0495638Z" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs?api-version=2019-08-01.10.0&timeout=30", - "body": "{\"id\": \"helloworld-job3\", \"poolInfo\": {\"autoPoolSpecification\": {\"autoPoolIdPrefix\": \"helloworld-pool3\", \"poolLifetimeOption\": \"job\", \"keepAlive\": false, \"pool\": {\"vmSize\": \"STANDARD_D1_V2\", \"virtualMachineConfiguration\": {\"imageReference\": {\"publisher\": \"Canonical\", \"offer\": \"UbuntuServer\", \"sku\": \"16.04.0-LTS\", \"version\": \"latest\"}, \"nodeAgentSKUId\": \"batch.node.ubuntu 16.04\"}, \"targetDedicatedNodes\": 1}}}, \"onAllTasksComplete\": \"noaction\", \"usesTaskDependencies\": true}", - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL2JhdGNoLmNvcmUud2luZG93cy5uZXQvIiwiaXNzIjoiaHR0cHM6Ly9zdHMud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3LyIsImlhdCI6MTU2NjMyMDcyNCwibmJmIjoxNTY2MzIwNzI0LCJleHAiOjE1NjYzMjQ2MjQsIl9jbGFpbV9uYW1lcyI6eyJncm91cHMiOiJzcmMxIn0sIl9jbGFpbV9zb3VyY2VzIjp7InNyYzEiOnsiZW5kcG9pbnQiOiJodHRwczovL2dyYXBoLndpbmRvd3MubmV0LzcyZjk4OGJmLTg2ZjEtNDFhZi05MWFiLTJkN2NkMDExZGI0Ny91c2Vycy8yNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEvZ2V0TWVtYmVyT2JqZWN0cyJ9fSwiYWNyIjoiMSIsImFpbyI6IkFVUUF1LzhNQUFBQUJmSVhrS1pUNXN2dGVyVzhpeVgyQ1JCODlJc2dTVFJtZFdPeHR0aFNMVXZzZEtwd0YxTmloNjFtcEdMYjRnNmxES01Md0lMTmtBSkhCblBCSithdU5BPT0iLCJhbXIiOlsicnNhIiwibWZhIl0sImFwcGlkIjoiMDRiMDc3OTUtOGRkYi00NjFhLWJiZWUtMDJmOWUxYmY3YjQ2IiwiYXBwaWRhY3IiOiIwIiwiZGV2aWNlaWQiOiIxZDUwYzVkZi1mZDAxLTRhNjQtODg1OS04NDcyMzc1OGEyNDQiLCJmYW1pbHlfbmFtZSI6IktsZWluIiwiZ2l2ZW5fbmFtZSI6IkJyYW5kb24iLCJpcGFkZHIiOiIxMzEuMTA3LjE1OS4yMiIsIm5hbWUiOiJCcmFuZG9uIEtsZWluIiwib2lkIjoiMjcyNDQ5MzUtYTRiOS00MGE0LWEyNzItNDI5NDJiNjdlY2YxIiwib25wcmVtX3NpZCI6IlMtMS01LTIxLTIxMjc1MjExODQtMTYwNDAxMjkyMC0xODg3OTI3NTI3LTMwODY5MTc0IiwicHVpZCI6IjEwMDMwMDAwQTkxNzc4OUUiLCJzY3AiOiJ1c2VyX2ltcGVyc29uYXRpb24iLCJzdWIiOiJaTnRJSW14ajVlSk9TbnJRTXh1UTFGeGVPOHhiYnVhQmFrU0FYYjRqRE84IiwidGlkIjoiNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3IiwidW5pcXVlX25hbWUiOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1cG4iOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1dGkiOiIzcU02WE1IMS1VeTc1OGREc3VFUUFBIiwidmVyIjoiMS4wIn0.6wF-URC5pN8R6lYNu887Vqul47X3Kpm5g_d0S6tYjtW42KcCv95dvXiWr3_xQ62vDBHLekWJUFTDt-JIa-7Jothw-k4LGIe4OyT3c5VeLMupH5fepX8puj3cfxUAubdUIwq3nw8XHksD979tOyFh_lOCyHPNs69UgpQUufHkX-262eCQjlQoXTigdmxd4uhW7ybcLKxTyIh16K8JI3tHU6lQQDeKGDVqgkXTWqAHWhlHiaZ8SYsfjV07lLS-YnBmjyM16WHnDCaUwDy326rKfbdsAS2r6br2NERDpX_yoq01rFxP1mzQrnokb7sAJBQbV5dqalO3kU0JwvcGwhO3hQ" - ], - "Content-Type": [ - "application/json; odata=minimalmetadata; charset=utf-8" - ], - "client-request-id": [ - "8374fd80-c372-11e9-baed-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "return-client-request-id": [ - "false" - ], - "Content-Length": [ - "476" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "DataServiceId": [ - "https://sdktest2.westcentralus.batch.azure.com/jobs/job-1" - ], - "Date": [ - "Tue, 20 Aug 2019 17:47:00 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "ETag": [ - "0x8D7259667CE5072" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:47:01 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Location": [ - "https://sdktest2.westcentralus.batch.azure.com/jobs/job-1" - ], - "Transfer-Encoding": [ - "chunked" - ], - "request-id": [ - "fa5e7cfa-07c4-43e0-8fec-8d1349635ceb" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/helloworld-job3/addtaskcollection?api-version=2019-08-01.10.0", - "body": "{\"value\": [{\"id\": \"merge\", \"displayName\": \"myMergeTask\", \"commandLine\": \"/bin/bash -c 'ls'\", \"resourceFiles\": [{\"autoStorageContainerName\": \"fgrp-output\"}], \"dependsOn\": {\"taskIdRanges\": [{\"start\": 0, \"end\": 17}]}}, {\"id\": \"17\", \"commandLine\": \"/bin/bash -c 'cat batch.pool.simple.resourcefile-legacy.json'\", \"resourceFiles\": [{\"httpUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.pool.simple.resourcefile-legacy.json?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=r&sv=2017-07-29&sr=b&sig=qjkdzdBo7dp66dBpwq1MzHYDaUFOkLn2OBE7PJVR8/o%3D\", \"filePath\": \"batch.pool.simple.resourcefile-legacy.json\"}], \"outputFiles\": [{\"filePattern\": \"**/stdout.txt\", \"destination\": {\"container\": {\"path\": \"output-batch.pool.simple.resourcefile-legacy.json\", \"containerUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=w&sv=2017-07-29&sr=c&sig=r3A76NHornQOLrIpsQEmKbrSRE6kjMfmfN7LhQ42BrY%3D\"}}, \"uploadOptions\": {\"uploadCondition\": \"tasksuccess\"}}]}, {\"id\": \"16\", \"commandLine\": \"/bin/bash -c 'cat batch.pool.simple.json'\", \"resourceFiles\": [{\"httpUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.pool.simple.json?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=r&sv=2017-07-29&sr=b&sig=5ggJXnam3lSRp5pN29cQ3s3M2%2B6K4elM6ZTK7TBjy0U%3D\", \"filePath\": \"batch.pool.simple.json\"}], \"outputFiles\": [{\"filePattern\": \"**/stdout.txt\", \"destination\": {\"container\": {\"path\": \"output-batch.pool.simple.json\", \"containerUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=w&sv=2017-07-29&sr=c&sig=r3A76NHornQOLrIpsQEmKbrSRE6kjMfmfN7LhQ42BrY%3D\"}}, \"uploadOptions\": {\"uploadCondition\": \"tasksuccess\"}}]}, {\"id\": \"15\", \"commandLine\": \"/bin/bash -c 'cat batch.pool.simple.apiversionfail.json'\", \"resourceFiles\": [{\"httpUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.pool.simple.apiversionfail.json?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=r&sv=2017-07-29&sr=b&sig=FF66MuydtAug8VyNO/jJmorJ7%2B%2BTtcfDqgeQTJFJr/k%3D\", \"filePath\": \"batch.pool.simple.apiversionfail.json\"}], \"outputFiles\": [{\"filePattern\": \"**/stdout.txt\", \"destination\": {\"container\": {\"path\": \"output-batch.pool.simple.apiversionfail.json\", \"containerUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=w&sv=2017-07-29&sr=c&sig=r3A76NHornQOLrIpsQEmKbrSRE6kjMfmfN7LhQ42BrY%3D\"}}, \"uploadOptions\": {\"uploadCondition\": \"tasksuccess\"}}]}, {\"id\": \"14\", \"commandLine\": \"/bin/bash -c 'cat batch.pool.parameters.json'\", \"resourceFiles\": [{\"httpUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.pool.parameters.json?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=r&sv=2017-07-29&sr=b&sig=M2Z7ZLvIzdRYEf9%2BnZUCcA8ekhAnMy8hVAzJMBXJXF0%3D\", \"filePath\": \"batch.pool.parameters.json\"}], \"outputFiles\": [{\"filePattern\": \"**/stdout.txt\", \"destination\": {\"container\": {\"path\": \"output-batch.pool.parameters.json\", \"containerUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=w&sv=2017-07-29&sr=c&sig=r3A76NHornQOLrIpsQEmKbrSRE6kjMfmfN7LhQ42BrY%3D\"}}, \"uploadOptions\": {\"uploadCondition\": \"tasksuccess\"}}]}, {\"id\": \"13\", \"commandLine\": \"/bin/bash -c 'cat batch.job.simple.json'\", \"resourceFiles\": [{\"httpUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.job.simple.json?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=r&sv=2017-07-29&sr=b&sig=zCEvBTq3iOotefeoSMGul54bLYIDOxgzn6C01kZACEA%3D\", \"filePath\": \"batch.job.simple.json\"}], \"outputFiles\": [{\"filePattern\": \"**/stdout.txt\", \"destination\": {\"container\": {\"path\": \"output-batch.job.simple.json\", \"containerUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=w&sv=2017-07-29&sr=c&sig=r3A76NHornQOLrIpsQEmKbrSRE6kjMfmfN7LhQ42BrY%3D\"}}, \"uploadOptions\": {\"uploadCondition\": \"tasksuccess\"}}]}, {\"id\": \"12\", \"commandLine\": \"/bin/bash -c 'cat batch.job.simple.apiversionfail.json'\", \"resourceFiles\": [{\"httpUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.job.simple.apiversionfail.json?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=r&sv=2017-07-29&sr=b&sig=brAyKdOfJVDB1uA0q7LlOVJNDIq3abJBPtYkIsScFzY%3D\", \"filePath\": \"batch.job.simple.apiversionfail.json\"}], \"outputFiles\": [{\"filePattern\": \"**/stdout.txt\", \"destination\": {\"container\": {\"path\": \"output-batch.job.simple.apiversionfail.json\", \"containerUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=w&sv=2017-07-29&sr=c&sig=r3A76NHornQOLrIpsQEmKbrSRE6kjMfmfN7LhQ42BrY%3D\"}}, \"uploadOptions\": {\"uploadCondition\": \"tasksuccess\"}}]}, {\"id\": \"11\", \"commandLine\": \"/bin/bash -c 'cat batch.job.resourcefile-legacy.json'\", \"resourceFiles\": [{\"httpUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.job.resourcefile-legacy.json?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=r&sv=2017-07-29&sr=b&sig=w4OupUnlhDLNJQVa6ISGpE5IwvDNjCqu/6PCaAjQBsM%3D\", \"filePath\": \"batch.job.resourcefile-legacy.json\"}], \"outputFiles\": [{\"filePattern\": \"**/stdout.txt\", \"destination\": {\"container\": {\"path\": \"output-batch.job.resourcefile-legacy.json\", \"containerUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=w&sv=2017-07-29&sr=c&sig=r3A76NHornQOLrIpsQEmKbrSRE6kjMfmfN7LhQ42BrY%3D\"}}, \"uploadOptions\": {\"uploadCondition\": \"tasksuccess\"}}]}, {\"id\": \"10\", \"commandLine\": \"/bin/bash -c 'cat batch.job.parametricsweep.json'\", \"resourceFiles\": [{\"httpUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.job.parametricsweep.json?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=r&sv=2017-07-29&sr=b&sig=A2vjvhldjW0QFavTNuWu5dAgwITtP/SjUQGAaRBE54Q%3D\", \"filePath\": \"batch.job.parametricsweep.json\"}], \"outputFiles\": [{\"filePattern\": \"**/stdout.txt\", \"destination\": {\"container\": {\"path\": \"output-batch.job.parametricsweep.json\", \"containerUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=w&sv=2017-07-29&sr=c&sig=r3A76NHornQOLrIpsQEmKbrSRE6kjMfmfN7LhQ42BrY%3D\"}}, \"uploadOptions\": {\"uploadCondition\": \"tasksuccess\"}}]}, {\"id\": \"9\", \"commandLine\": \"/bin/bash -c 'cat batch.job.parameters.json'\", \"resourceFiles\": [{\"httpUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.job.parameters.json?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=r&sv=2017-07-29&sr=b&sig=i%2BYgbj78cFrcKJOcXFIYHE3TQgorojM9zbHJbTbIT%2B4%3D\", \"filePath\": \"batch.job.parameters.json\"}], \"outputFiles\": [{\"filePattern\": \"**/stdout.txt\", \"destination\": {\"container\": {\"path\": \"output-batch.job.parameters.json\", \"containerUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=w&sv=2017-07-29&sr=c&sig=r3A76NHornQOLrIpsQEmKbrSRE6kjMfmfN7LhQ42BrY%3D\"}}, \"uploadOptions\": {\"uploadCondition\": \"tasksuccess\"}}]}, {\"id\": \"8\", \"commandLine\": \"/bin/bash -c 'cat batch.job.mergetask.json'\", \"resourceFiles\": [{\"httpUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-in/batch.job.mergetask.json?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=r&sv=2017-07-29&sr=b&sig=EJbQk0h1IsKo%2Bs5n4faamwDKNkGiTUNNJgUkq9LvCso%3D\", \"filePath\": \"batch.job.mergetask.json\"}], \"outputFiles\": [{\"filePattern\": \"**/stdout.txt\", \"destination\": {\"container\": {\"path\": \"output-batch.job.mergetask.json\", \"containerUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=w&sv=2017-07-29&sr=c&sig=r3A76NHornQOLrIpsQEmKbrSRE6kjMfmfN7LhQ42BrY%3D\"}}, \"uploadOptions\": {\"uploadCondition\": \"tasksuccess\"}}]}, {\"id\": \"7\", \"commandLine\": \"/bin/bash -c 'cat batch-applicationTemplate-untypedParameter.json'\", \"resourceFiles\": [{\"httpUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-untypedParameter.json?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=r&sv=2017-07-29&sr=b&sig=l8eFtrkubdXTb2639IRmav77LwsaPg15nZ4hBUwRx0s%3D\", \"filePath\": \"batch-applicationTemplate-untypedParameter.json\"}], \"outputFiles\": [{\"filePattern\": \"**/stdout.txt\", \"destination\": {\"container\": {\"path\": \"output-batch-applicationTemplate-untypedParameter.json\", \"containerUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=w&sv=2017-07-29&sr=c&sig=r3A76NHornQOLrIpsQEmKbrSRE6kjMfmfN7LhQ42BrY%3D\"}}, \"uploadOptions\": {\"uploadCondition\": \"tasksuccess\"}}]}, {\"id\": \"6\", \"commandLine\": \"/bin/bash -c 'cat batch-applicationTemplate-unsupportedProperty.json'\", \"resourceFiles\": [{\"httpUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-unsupportedProperty.json?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=r&sv=2017-07-29&sr=b&sig=2D57fqug%2B5bY617zxZvfKMXpm7tE6qbQkUYH/qhUeQs%3D\", \"filePath\": \"batch-applicationTemplate-unsupportedProperty.json\"}], \"outputFiles\": [{\"filePattern\": \"**/stdout.txt\", \"destination\": {\"container\": {\"path\": \"output-batch-applicationTemplate-unsupportedProperty.json\", \"containerUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=w&sv=2017-07-29&sr=c&sig=r3A76NHornQOLrIpsQEmKbrSRE6kjMfmfN7LhQ42BrY%3D\"}}, \"uploadOptions\": {\"uploadCondition\": \"tasksuccess\"}}]}, {\"id\": \"5\", \"commandLine\": \"/bin/bash -c 'cat batch-applicationTemplate-static.json'\", \"resourceFiles\": [{\"httpUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-static.json?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=r&sv=2017-07-29&sr=b&sig=fGvx1JC7jhfMydOrP91FmBDGRiTZ%2BRcXH5kch/XmsGU%3D\", \"filePath\": \"batch-applicationTemplate-static.json\"}], \"outputFiles\": [{\"filePattern\": \"**/stdout.txt\", \"destination\": {\"container\": {\"path\": \"output-batch-applicationTemplate-static.json\", \"containerUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=w&sv=2017-07-29&sr=c&sig=r3A76NHornQOLrIpsQEmKbrSRE6kjMfmfN7LhQ42BrY%3D\"}}, \"uploadOptions\": {\"uploadCondition\": \"tasksuccess\"}}]}, {\"id\": \"4\", \"commandLine\": \"/bin/bash -c 'cat batch-applicationTemplate-prohibitedPriority.json'\", \"resourceFiles\": [{\"httpUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-prohibitedPriority.json?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=r&sv=2017-07-29&sr=b&sig=uco6p/1Grz4Tot2vIUBxc6SLJLmrtMC1K/y6twl/xE8%3D\", \"filePath\": \"batch-applicationTemplate-prohibitedPriority.json\"}], \"outputFiles\": [{\"filePattern\": \"**/stdout.txt\", \"destination\": {\"container\": {\"path\": \"output-batch-applicationTemplate-prohibitedPriority.json\", \"containerUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=w&sv=2017-07-29&sr=c&sig=r3A76NHornQOLrIpsQEmKbrSRE6kjMfmfN7LhQ42BrY%3D\"}}, \"uploadOptions\": {\"uploadCondition\": \"tasksuccess\"}}]}, {\"id\": \"3\", \"commandLine\": \"/bin/bash -c 'cat batch-applicationTemplate-prohibitedPoolInfo.json'\", \"resourceFiles\": [{\"httpUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-prohibitedPoolInfo.json?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=r&sv=2017-07-29&sr=b&sig=wdkt74joqZjY7wPQJFe8j5yq9uSK4WMLisQgQCSuZ6c%3D\", \"filePath\": \"batch-applicationTemplate-prohibitedPoolInfo.json\"}], \"outputFiles\": [{\"filePattern\": \"**/stdout.txt\", \"destination\": {\"container\": {\"path\": \"output-batch-applicationTemplate-prohibitedPoolInfo.json\", \"containerUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=w&sv=2017-07-29&sr=c&sig=r3A76NHornQOLrIpsQEmKbrSRE6kjMfmfN7LhQ42BrY%3D\"}}, \"uploadOptions\": {\"uploadCondition\": \"tasksuccess\"}}]}, {\"id\": \"2\", \"commandLine\": \"/bin/bash -c 'cat batch-applicationTemplate-prohibitedId.json'\", \"resourceFiles\": [{\"httpUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-prohibitedId.json?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=r&sv=2017-07-29&sr=b&sig=60aoV/Dp48M1Y8hSOFaCSWtgB/PlhznKSjeBkh8jsbo%3D\", \"filePath\": \"batch-applicationTemplate-prohibitedId.json\"}], \"outputFiles\": [{\"filePattern\": \"**/stdout.txt\", \"destination\": {\"container\": {\"path\": \"output-batch-applicationTemplate-prohibitedId.json\", \"containerUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=w&sv=2017-07-29&sr=c&sig=r3A76NHornQOLrIpsQEmKbrSRE6kjMfmfN7LhQ42BrY%3D\"}}, \"uploadOptions\": {\"uploadCondition\": \"tasksuccess\"}}]}, {\"id\": \"1\", \"commandLine\": \"/bin/bash -c 'cat batch-applicationTemplate-prohibitedApplicationTemplateInfo.json'\", \"resourceFiles\": [{\"httpUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-prohibitedApplicationTemplateInfo.json?st=2019-08-20T17%3A32%3A00Z&se=2019-08-27T17%3A47%3A00Z&sp=r&sv=2017-07-29&sr=b&sig=yX1pAuKK7y7V4vsUx5zmeUICpfXIONdalc8RnaygIvA%3D\", \"filePath\": \"batch-applicationTemplate-prohibitedApplicationTemplateInfo.json\"}], \"outputFiles\": [{\"filePattern\": \"**/stdout.txt\", \"destination\": {\"container\": {\"path\": \"output-batch-applicationTemplate-prohibitedApplicationTemplateInfo.json\", \"containerUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=w&sv=2017-07-29&sr=c&sig=r3A76NHornQOLrIpsQEmKbrSRE6kjMfmfN7LhQ42BrY%3D\"}}, \"uploadOptions\": {\"uploadCondition\": \"tasksuccess\"}}]}, {\"id\": \"0\", \"commandLine\": \"/bin/bash -c 'cat batch-applicationTemplate-parameters.json'\", \"resourceFiles\": [{\"httpUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-in/batch-applicationTemplate-parameters.json?st=2019-08-20T17%3A32%3A00Z&se=2019-08-27T17%3A47%3A00Z&sp=r&sv=2017-07-29&sr=b&sig=Cf35OizojkYQSSce65zDM732hBEZ%2Bvbaz391CmNmHgw%3D\", \"filePath\": \"batch-applicationTemplate-parameters.json\"}], \"outputFiles\": [{\"filePattern\": \"**/stdout.txt\", \"destination\": {\"container\": {\"path\": \"output-batch-applicationTemplate-parameters.json\", \"containerUrl\": \"https://sdkteststore2.blob.core.windows.net/fgrp-output?st=2019-08-20T17%3A32%3A01Z&se=2019-08-27T17%3A47%3A01Z&sp=w&sv=2017-07-29&sr=c&sig=r3A76NHornQOLrIpsQEmKbrSRE6kjMfmfN7LhQ42BrY%3D\"}}, \"uploadOptions\": {\"uploadCondition\": \"tasksuccess\"}}]}]}", - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL2JhdGNoLmNvcmUud2luZG93cy5uZXQvIiwiaXNzIjoiaHR0cHM6Ly9zdHMud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3LyIsImlhdCI6MTU2NjMyMDcyNCwibmJmIjoxNTY2MzIwNzI0LCJleHAiOjE1NjYzMjQ2MjQsIl9jbGFpbV9uYW1lcyI6eyJncm91cHMiOiJzcmMxIn0sIl9jbGFpbV9zb3VyY2VzIjp7InNyYzEiOnsiZW5kcG9pbnQiOiJodHRwczovL2dyYXBoLndpbmRvd3MubmV0LzcyZjk4OGJmLTg2ZjEtNDFhZi05MWFiLTJkN2NkMDExZGI0Ny91c2Vycy8yNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEvZ2V0TWVtYmVyT2JqZWN0cyJ9fSwiYWNyIjoiMSIsImFpbyI6IkFVUUF1LzhNQUFBQUJmSVhrS1pUNXN2dGVyVzhpeVgyQ1JCODlJc2dTVFJtZFdPeHR0aFNMVXZzZEtwd0YxTmloNjFtcEdMYjRnNmxES01Md0lMTmtBSkhCblBCSithdU5BPT0iLCJhbXIiOlsicnNhIiwibWZhIl0sImFwcGlkIjoiMDRiMDc3OTUtOGRkYi00NjFhLWJiZWUtMDJmOWUxYmY3YjQ2IiwiYXBwaWRhY3IiOiIwIiwiZGV2aWNlaWQiOiIxZDUwYzVkZi1mZDAxLTRhNjQtODg1OS04NDcyMzc1OGEyNDQiLCJmYW1pbHlfbmFtZSI6IktsZWluIiwiZ2l2ZW5fbmFtZSI6IkJyYW5kb24iLCJpcGFkZHIiOiIxMzEuMTA3LjE1OS4yMiIsIm5hbWUiOiJCcmFuZG9uIEtsZWluIiwib2lkIjoiMjcyNDQ5MzUtYTRiOS00MGE0LWEyNzItNDI5NDJiNjdlY2YxIiwib25wcmVtX3NpZCI6IlMtMS01LTIxLTIxMjc1MjExODQtMTYwNDAxMjkyMC0xODg3OTI3NTI3LTMwODY5MTc0IiwicHVpZCI6IjEwMDMwMDAwQTkxNzc4OUUiLCJzY3AiOiJ1c2VyX2ltcGVyc29uYXRpb24iLCJzdWIiOiJaTnRJSW14ajVlSk9TbnJRTXh1UTFGeGVPOHhiYnVhQmFrU0FYYjRqRE84IiwidGlkIjoiNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3IiwidW5pcXVlX25hbWUiOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1cG4iOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1dGkiOiIzcU02WE1IMS1VeTc1OGREc3VFUUFBIiwidmVyIjoiMS4wIn0.6wF-URC5pN8R6lYNu887Vqul47X3Kpm5g_d0S6tYjtW42KcCv95dvXiWr3_xQ62vDBHLekWJUFTDt-JIa-7Jothw-k4LGIe4OyT3c5VeLMupH5fepX8puj3cfxUAubdUIwq3nw8XHksD979tOyFh_lOCyHPNs69UgpQUufHkX-262eCQjlQoXTigdmxd4uhW7ybcLKxTyIh16K8JI3tHU6lQQDeKGDVqgkXTWqAHWhlHiaZ8SYsfjV07lLS-YnBmjyM16WHnDCaUwDy326rKfbdsAS2r6br2NERDpX_yoq01rFxP1mzQrnokb7sAJBQbV5dqalO3kU0JwvcGwhO3hQ" - ], - "Content-Type": [ - "application/json; odata=minimalmetadata; charset=utf-8" - ], - "client-request-id": [ - "839ca99e-c372-11e9-9861-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "14598" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "Content-Type": [ - "application/json;odata=minimalmetadata" - ], - "Date": [ - "Tue, 20 Aug 2019 17:47:00 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "request-id": [ - "655d112f-a224-4283-bec3-c78b1aebd4eb" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "{\r\n \"odata.metadata\":\"https://sdktest2.westcentralus.batch.azure.com/$metadata#taskaddresult\",\"value\":[\r\n {\r\n \"status\":\"Success\",\"taskId\":\"merge\",\"eTag\":\"0x8D7259667F84623\",\"lastModified\":\"2019-08-20T17:47:01.5860771Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/helloworld-job3/tasks/merge\"\r\n },{\r\n \"status\":\"Success\",\"taskId\":\"17\",\"eTag\":\"0x8D7259667FDF34C\",\"lastModified\":\"2019-08-20T17:47:01.623278Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/helloworld-job3/tasks/17\"\r\n },{\r\n \"status\":\"Success\",\"taskId\":\"15\",\"eTag\":\"0x8D7259668000E6D\",\"lastModified\":\"2019-08-20T17:47:01.6370797Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/helloworld-job3/tasks/15\"\r\n },{\r\n \"status\":\"Success\",\"taskId\":\"8\",\"eTag\":\"0x8D725966802F483\",\"lastModified\":\"2019-08-20T17:47:01.6560771Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/helloworld-job3/tasks/8\"\r\n },{\r\n \"status\":\"Success\",\"taskId\":\"16\",\"eTag\":\"0x8D725966808E7EA\",\"lastModified\":\"2019-08-20T17:47:01.6950762Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/helloworld-job3/tasks/16\"\r\n },{\r\n \"status\":\"Success\",\"taskId\":\"13\",\"eTag\":\"0x8D72596680A207B\",\"lastModified\":\"2019-08-20T17:47:01.7030779Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/helloworld-job3/tasks/13\"\r\n },{\r\n \"status\":\"Success\",\"taskId\":\"10\",\"eTag\":\"0x8D72596680BCE0E\",\"lastModified\":\"2019-08-20T17:47:01.714075Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/helloworld-job3/tasks/10\"\r\n },{\r\n \"status\":\"Success\",\"taskId\":\"11\",\"eTag\":\"0x8D72596680BA702\",\"lastModified\":\"2019-08-20T17:47:01.7130754Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/helloworld-job3/tasks/11\"\r\n },{\r\n \"status\":\"Success\",\"taskId\":\"9\",\"eTag\":\"0x8D72596681061ED\",\"lastModified\":\"2019-08-20T17:47:01.7440749Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/helloworld-job3/tasks/9\"\r\n },{\r\n \"status\":\"Success\",\"taskId\":\"6\",\"eTag\":\"0x8D72596680BCE0E\",\"lastModified\":\"2019-08-20T17:47:01.714075Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/helloworld-job3/tasks/6\"\r\n },{\r\n \"status\":\"Success\",\"taskId\":\"5\",\"eTag\":\"0x8D72596680BA702\",\"lastModified\":\"2019-08-20T17:47:01.7130754Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/helloworld-job3/tasks/5\"\r\n },{\r\n \"status\":\"Success\",\"taskId\":\"7\",\"eTag\":\"0x8D72596680BCE0E\",\"lastModified\":\"2019-08-20T17:47:01.714075Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/helloworld-job3/tasks/7\"\r\n },{\r\n \"status\":\"Success\",\"taskId\":\"14\",\"eTag\":\"0x8D72596680A4782\",\"lastModified\":\"2019-08-20T17:47:01.704077Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/helloworld-job3/tasks/14\"\r\n },{\r\n \"status\":\"Success\",\"taskId\":\"4\",\"eTag\":\"0x8D72596680BF53A\",\"lastModified\":\"2019-08-20T17:47:01.7150778Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/helloworld-job3/tasks/4\"\r\n },{\r\n \"status\":\"Success\",\"taskId\":\"12\",\"eTag\":\"0x8D72596680A6E93\",\"lastModified\":\"2019-08-20T17:47:01.7050771Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/helloworld-job3/tasks/12\"\r\n },{\r\n \"status\":\"Success\",\"taskId\":\"0\",\"eTag\":\"0x8D72596680BCE0E\",\"lastModified\":\"2019-08-20T17:47:01.714075Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/helloworld-job3/tasks/0\"\r\n },{\r\n \"status\":\"Success\",\"taskId\":\"1\",\"eTag\":\"0x8D72596680BF53A\",\"lastModified\":\"2019-08-20T17:47:01.7150778Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/helloworld-job3/tasks/1\"\r\n },{\r\n \"status\":\"Success\",\"taskId\":\"2\",\"eTag\":\"0x8D72596680D54AF\",\"lastModified\":\"2019-08-20T17:47:01.7240751Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/helloworld-job3/tasks/2\"\r\n },{\r\n \"status\":\"Success\",\"taskId\":\"3\",\"eTag\":\"0x8D72596680D2D99\",\"lastModified\":\"2019-08-20T17:47:01.7230745Z\",\"location\":\"https://sdktest2.westcentralus.batch.azure.com/jobs/helloworld-job3/tasks/3\"\r\n }\r\n ]\r\n}" - } - } - }, - { - "request": { - "method": "PATCH", - "uri": "https://sdktest2.westcentralus.batch.azure.com/jobs/helloworld-job3?api-version=2019-08-01.10.0", - "body": "{\"onAllTasksComplete\": \"terminatejob\"}", - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-batch/8.0.0 Azure-SDK-For-Python batchextensionsclient/7.0.0" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL2JhdGNoLmNvcmUud2luZG93cy5uZXQvIiwiaXNzIjoiaHR0cHM6Ly9zdHMud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3LyIsImlhdCI6MTU2NjMyMDcyNCwibmJmIjoxNTY2MzIwNzI0LCJleHAiOjE1NjYzMjQ2MjQsIl9jbGFpbV9uYW1lcyI6eyJncm91cHMiOiJzcmMxIn0sIl9jbGFpbV9zb3VyY2VzIjp7InNyYzEiOnsiZW5kcG9pbnQiOiJodHRwczovL2dyYXBoLndpbmRvd3MubmV0LzcyZjk4OGJmLTg2ZjEtNDFhZi05MWFiLTJkN2NkMDExZGI0Ny91c2Vycy8yNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEvZ2V0TWVtYmVyT2JqZWN0cyJ9fSwiYWNyIjoiMSIsImFpbyI6IkFVUUF1LzhNQUFBQUJmSVhrS1pUNXN2dGVyVzhpeVgyQ1JCODlJc2dTVFJtZFdPeHR0aFNMVXZzZEtwd0YxTmloNjFtcEdMYjRnNmxES01Md0lMTmtBSkhCblBCSithdU5BPT0iLCJhbXIiOlsicnNhIiwibWZhIl0sImFwcGlkIjoiMDRiMDc3OTUtOGRkYi00NjFhLWJiZWUtMDJmOWUxYmY3YjQ2IiwiYXBwaWRhY3IiOiIwIiwiZGV2aWNlaWQiOiIxZDUwYzVkZi1mZDAxLTRhNjQtODg1OS04NDcyMzc1OGEyNDQiLCJmYW1pbHlfbmFtZSI6IktsZWluIiwiZ2l2ZW5fbmFtZSI6IkJyYW5kb24iLCJpcGFkZHIiOiIxMzEuMTA3LjE1OS4yMiIsIm5hbWUiOiJCcmFuZG9uIEtsZWluIiwib2lkIjoiMjcyNDQ5MzUtYTRiOS00MGE0LWEyNzItNDI5NDJiNjdlY2YxIiwib25wcmVtX3NpZCI6IlMtMS01LTIxLTIxMjc1MjExODQtMTYwNDAxMjkyMC0xODg3OTI3NTI3LTMwODY5MTc0IiwicHVpZCI6IjEwMDMwMDAwQTkxNzc4OUUiLCJzY3AiOiJ1c2VyX2ltcGVyc29uYXRpb24iLCJzdWIiOiJaTnRJSW14ajVlSk9TbnJRTXh1UTFGeGVPOHhiYnVhQmFrU0FYYjRqRE84IiwidGlkIjoiNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3IiwidW5pcXVlX25hbWUiOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1cG4iOiJicmtsZWluQG1pY3Jvc29mdC5jb20iLCJ1dGkiOiIzcU02WE1IMS1VeTc1OGREc3VFUUFBIiwidmVyIjoiMS4wIn0.6wF-URC5pN8R6lYNu887Vqul47X3Kpm5g_d0S6tYjtW42KcCv95dvXiWr3_xQ62vDBHLekWJUFTDt-JIa-7Jothw-k4LGIe4OyT3c5VeLMupH5fepX8puj3cfxUAubdUIwq3nw8XHksD979tOyFh_lOCyHPNs69UgpQUufHkX-262eCQjlQoXTigdmxd4uhW7ybcLKxTyIh16K8JI3tHU6lQQDeKGDVqgkXTWqAHWhlHiaZ8SYsfjV07lLS-YnBmjyM16WHnDCaUwDy326rKfbdsAS2r6br2NERDpX_yoq01rFxP1mzQrnokb7sAJBQbV5dqalO3kU0JwvcGwhO3hQ" - ], - "Content-Type": [ - "application/json; odata=minimalmetadata; charset=utf-8" - ], - "client-request-id": [ - "83e2eb8c-c372-11e9-ba0c-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "38" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "DataServiceId": [ - "https://sdktest2.westcentralus.batch.azure.com/jobs/helloworld-job3" - ], - "Date": [ - "Tue, 20 Aug 2019 17:47:01 GMT" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "ETag": [ - "0x8D72596681F6F76" - ], - "request-id": [ - "b41e04a0-98b0-441e-9c92-ae47a212604c" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:47:01 GMT" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "Transfer-Encoding": [ - "chunked" - ], - "X-Content-Type-Options": [ - "nosniff" - ], - "DataServiceVersion": [ - "3.0" - ] - }, - "body": { - "string": "" - } - } - } - ] -} \ No newline at end of file diff --git a/tests/recordings/test_batch_upload_live.yaml b/tests/recordings/test_batch_upload_live.yaml deleted file mode 100644 index d1591668..00000000 --- a/tests/recordings/test_batch_upload_live.yaml +++ /dev/null @@ -1,709 +0,0 @@ -{ - "version": 1, - "interactions": [ - { - "request": { - "method": "GET", - "uri": "https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Batch/batchAccounts/sdktest2?api-version=2019-08-01", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-mgmt-batch/7.0.0 Azure-SDK-For-Python" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL21hbmFnZW1lbnQuY29yZS53aW5kb3dzLm5ldC8iLCJpc3MiOiJodHRwczovL3N0cy53aW5kb3dzLm5ldC83MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDcvIiwiaWF0IjoxNTY2MzIwNzE4LCJuYmYiOjE1NjYzMjA3MTgsImV4cCI6MTU2NjMyNDYxOCwiX2NsYWltX25hbWVzIjp7Imdyb3VwcyI6InNyYzEifSwiX2NsYWltX3NvdXJjZXMiOnsic3JjMSI6eyJlbmRwb2ludCI6Imh0dHBzOi8vZ3JhcGgud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3L3VzZXJzLzI3MjQ0OTM1LWE0YjktNDBhNC1hMjcyLTQyOTQyYjY3ZWNmMS9nZXRNZW1iZXJPYmplY3RzIn19LCJhY3IiOiIxIiwiYWlvIjoiQVVRQXUvOE1BQUFBT0lvTEVUclJGWnErQmFoaVNrVmhNNXR5QzYwSDZBSDNlZG5vMGJQbTFRYUtvV3Rva01QaDdiZjIvM0VFZ0NHbmo0UFFWY3FHaXdVbkFQYjRONmZwZ1E9PSIsImFtciI6WyJyc2EiLCJtZmEiXSwiYXBwaWQiOiIwNGIwNzc5NS04ZGRiLTQ2MWEtYmJlZS0wMmY5ZTFiZjdiNDYiLCJhcHBpZGFjciI6IjAiLCJkZXZpY2VpZCI6IjFkNTBjNWRmLWZkMDEtNGE2NC04ODU5LTg0NzIzNzU4YTI0NCIsImZhbWlseV9uYW1lIjoiS2xlaW4iLCJnaXZlbl9uYW1lIjoiQnJhbmRvbiIsImlwYWRkciI6IjEzMS4xMDcuMTU5LjIyIiwibmFtZSI6IkJyYW5kb24gS2xlaW4iLCJvaWQiOiIyNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEiLCJvbnByZW1fc2lkIjoiUy0xLTUtMjEtMjEyNzUyMTE4NC0xNjA0MDEyOTIwLTE4ODc5Mjc1MjctMzA4NjkxNzQiLCJwdWlkIjoiMTAwMzAwMDBBOTE3Nzg5RSIsInNjcCI6InVzZXJfaW1wZXJzb25hdGlvbiIsInN1YiI6IjMtaVZMWlVxZzhyWVVFNHlLRXZPSktES0N2Z1I0SVJvQXJhVzlRWmJNRkEiLCJ0aWQiOiI3MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDciLCJ1bmlxdWVfbmFtZSI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInVwbiI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInV0aSI6IlZkQ05pT2w3Z0UyWkw3QTVBMFFPQUEiLCJ2ZXIiOiIxLjAifQ.XjlVAUievRf_e8bKWsAY7Ca1e2RR2FIB4PpXBKa6Vzy5xfZ_c33OFQWkB610FXt-E86jl61B0siTx1aVQQbXt9iAdqcfb27MKeDX_sXi_BjTUIA6xgfRm1CnG8vFq_GpLPy0GIgzuQkaPqPifXIz39SzMavmrLaAp5Ct1j09e9yXwcIxLhSRg_WibgqY22tbcremd_-y9qZex3xEzc798Nz62_AADDKgBjivlwxGX5TpOiEZxhNhD6pS4nlTJ4eiyS7mFRC1nIGB1SMZrgnWjQ5dRcib_7krgdW_4J-kqA-Tg4FGo8aPFBxjMADxfCOF04W2KykUZpLfF_9c2HZGoQ" - ], - "x-ms-client-request-id": [ - "620e0246-c36d-11e9-8d5d-44032c851686" - ], - "accept-language": [ - "en-US" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "X-Content-Type-Options": [ - "nosniff" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "ETag": [ - "\"0x8D719F74B1A7775\"" - ], - "Vary": [ - "Accept-Encoding" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "Content-Type": [ - "application/json; charset=utf-8" - ], - "Cache-Control": [ - "no-cache" - ], - "Date": [ - "Tue, 20 Aug 2019 17:10:18 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Transfer-Encoding": [ - "chunked" - ], - "Expires": [ - "-1" - ], - "Last-Modified": [ - "Mon, 05 Aug 2019 22:50:20 GMT" - ], - "content-length": [ - "2074" - ] - }, - "body": { - "string": "{\"id\":\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Batch/batchAccounts/sdktest2\",\"name\":\"sdktest2\",\"type\":\"Microsoft.Batch/batchAccounts\",\"location\":\"westcentralus\",\"properties\":{\"accountEndpoint\":\"sdktest2.westcentralus.batch.azure.com\",\"provisioningState\":\"Succeeded\",\"dedicatedCoreQuota\":20,\"dedicatedCoreQuotaPerVMFamily\":[{\"name\":\"standardA0_A7Family\",\"coreQuota\":20},{\"name\":\"standardDv2Family\",\"coreQuota\":20},{\"name\":\"standardA8_A11Family\",\"coreQuota\":0},{\"name\":\"standardDFamily\",\"coreQuota\":0},{\"name\":\"standardGFamily\",\"coreQuota\":0},{\"name\":\"basicAFamily\",\"coreQuota\":0},{\"name\":\"standardFFamily\",\"coreQuota\":0},{\"name\":\"standardNVFamily\",\"coreQuota\":0},{\"name\":\"standardNVPromoFamily\",\"coreQuota\":0},{\"name\":\"standardNCFamily\",\"coreQuota\":0},{\"name\":\"standardNCPromoFamily\",\"coreQuota\":0},{\"name\":\"standardHFamily\",\"coreQuota\":0},{\"name\":\"standardHPromoFamily\",\"coreQuota\":0},{\"name\":\"standardAv2Family\",\"coreQuota\":0},{\"name\":\"standardMSFamily\",\"coreQuota\":0},{\"name\":\"standardDv3Family\",\"coreQuota\":0},{\"name\":\"standardEv3Family\",\"coreQuota\":0},{\"name\":\"standardDSFamily\",\"coreQuota\":0},{\"name\":\"standardDSv2Family\",\"coreQuota\":0},{\"name\":\"standardDSv3Family\",\"coreQuota\":0},{\"name\":\"standardFSFamily\",\"coreQuota\":0},{\"name\":\"standardESv3Family\",\"coreQuota\":0},{\"name\":\"standardGSFamily\",\"coreQuota\":0},{\"name\":\"standardLSFamily\",\"coreQuota\":0},{\"name\":\"standardNCSv2Family\",\"coreQuota\":0},{\"name\":\"standardNDSFamily\",\"coreQuota\":0},{\"name\":\"standardNCSv3Family\",\"coreQuota\":0},{\"name\":\"standardFSv2Family\",\"coreQuota\":0},{\"name\":\"standardHBSFamily\",\"coreQuota\":0},{\"name\":\"standardHCSFamily\",\"coreQuota\":0}],\"dedicatedCoreQuotaPerVMFamilyEnforced\":false,\"lowPriorityCoreQuota\":100,\"poolQuota\":100,\"activeJobAndJobScheduleQuota\":300,\"autoStorage\":{\"storageAccountId\":\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Storage/storageAccounts/sdkteststore2\",\"lastKeySync\":\"2019-07-16T21:55:40.4909987Z\"},\"poolAllocationMode\":\"BatchService\"},\"tags\":{\"rawr\":\"test\"}}" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Storage/storageAccounts/sdkteststore2/listKeys?api-version=2018-02-01", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-mgmt-storage/2.0.0 Azure-SDK-For-Python" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL21hbmFnZW1lbnQuY29yZS53aW5kb3dzLm5ldC8iLCJpc3MiOiJodHRwczovL3N0cy53aW5kb3dzLm5ldC83MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDcvIiwiaWF0IjoxNTY2MzIwNzE4LCJuYmYiOjE1NjYzMjA3MTgsImV4cCI6MTU2NjMyNDYxOCwiX2NsYWltX25hbWVzIjp7Imdyb3VwcyI6InNyYzEifSwiX2NsYWltX3NvdXJjZXMiOnsic3JjMSI6eyJlbmRwb2ludCI6Imh0dHBzOi8vZ3JhcGgud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3L3VzZXJzLzI3MjQ0OTM1LWE0YjktNDBhNC1hMjcyLTQyOTQyYjY3ZWNmMS9nZXRNZW1iZXJPYmplY3RzIn19LCJhY3IiOiIxIiwiYWlvIjoiQVVRQXUvOE1BQUFBT0lvTEVUclJGWnErQmFoaVNrVmhNNXR5QzYwSDZBSDNlZG5vMGJQbTFRYUtvV3Rva01QaDdiZjIvM0VFZ0NHbmo0UFFWY3FHaXdVbkFQYjRONmZwZ1E9PSIsImFtciI6WyJyc2EiLCJtZmEiXSwiYXBwaWQiOiIwNGIwNzc5NS04ZGRiLTQ2MWEtYmJlZS0wMmY5ZTFiZjdiNDYiLCJhcHBpZGFjciI6IjAiLCJkZXZpY2VpZCI6IjFkNTBjNWRmLWZkMDEtNGE2NC04ODU5LTg0NzIzNzU4YTI0NCIsImZhbWlseV9uYW1lIjoiS2xlaW4iLCJnaXZlbl9uYW1lIjoiQnJhbmRvbiIsImlwYWRkciI6IjEzMS4xMDcuMTU5LjIyIiwibmFtZSI6IkJyYW5kb24gS2xlaW4iLCJvaWQiOiIyNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEiLCJvbnByZW1fc2lkIjoiUy0xLTUtMjEtMjEyNzUyMTE4NC0xNjA0MDEyOTIwLTE4ODc5Mjc1MjctMzA4NjkxNzQiLCJwdWlkIjoiMTAwMzAwMDBBOTE3Nzg5RSIsInNjcCI6InVzZXJfaW1wZXJzb25hdGlvbiIsInN1YiI6IjMtaVZMWlVxZzhyWVVFNHlLRXZPSktES0N2Z1I0SVJvQXJhVzlRWmJNRkEiLCJ0aWQiOiI3MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDciLCJ1bmlxdWVfbmFtZSI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInVwbiI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInV0aSI6IlZkQ05pT2w3Z0UyWkw3QTVBMFFPQUEiLCJ2ZXIiOiIxLjAifQ.XjlVAUievRf_e8bKWsAY7Ca1e2RR2FIB4PpXBKa6Vzy5xfZ_c33OFQWkB610FXt-E86jl61B0siTx1aVQQbXt9iAdqcfb27MKeDX_sXi_BjTUIA6xgfRm1CnG8vFq_GpLPy0GIgzuQkaPqPifXIz39SzMavmrLaAp5Ct1j09e9yXwcIxLhSRg_WibgqY22tbcremd_-y9qZex3xEzc798Nz62_AADDKgBjivlwxGX5TpOiEZxhNhD6pS4nlTJ4eiyS7mFRC1nIGB1SMZrgnWjQ5dRcib_7krgdW_4J-kqA-Tg4FGo8aPFBxjMADxfCOF04W2KykUZpLfF_9c2HZGoQ" - ], - "Content-Type": [ - "application/json; charset=utf-8" - ], - "x-ms-client-request-id": [ - "635f8cc8-c36d-11e9-8f2e-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "X-Content-Type-Options": [ - "nosniff" - ], - "Server": [ - "Microsoft-Azure-Storage-Resource-Provider/1.0,Microsoft-HTTPAPI/2.0 Microsoft-HTTPAPI/2.0" - ], - "Vary": [ - "Accept-Encoding" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "x-ms-ratelimit-remaining-subscription-writes": [ - "1198" - ], - "Content-Type": [ - "application/json" - ], - "Cache-Control": [ - "no-cache" - ], - "Date": [ - "Tue, 20 Aug 2019 17:10:19 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Transfer-Encoding": [ - "chunked" - ], - "Expires": [ - "-1" - ], - "content-length": [ - "288" - ] - }, - "body": { - "string": "{\"keys\":[{\"keyName\":\"key1\",\"value\":\"abc==\",\"permissions\":\"FULL\"},{\"keyName\":\"key2\",\"value\":\"def==\",\"permissions\":\"FULL\"}]}" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-cli-batch-extensions-live-tests?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "639a9b26-c36d-11e9-bd96-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:10:20 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:v7Qz3XrJDiV1hh2gPzfDRZpEwaXRSQlIVDDYAJfIxMQ=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "ETag": [ - "\"0x8D7259147EDD103\"" - ], - "Content-Length": [ - "0" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Date": [ - "Tue, 20 Aug 2019 17:10:20 GMT" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:10:20 GMT" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-cli-batch-extensions-live-tests/foo.txt?comp=metadata", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "63bda924-c36d-11e9-9f01-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:10:20 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:5vEpgDf9i7CfmjL971k17Nc+Pk88wELnAFKqF2GpHEY=" - ] - } - }, - "response": { - "status": { - "code": 404, - "message": "The specified blob does not exist." - }, - "headers": { - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-error-code": [ - "BlobNotFound" - ], - "Content-Type": [ - "application/xml" - ], - "Date": [ - "Tue, 20 Aug 2019 17:10:20 GMT" - ], - "Content-Length": [ - "215" - ] - }, - "body": { - "string": "\ufeffBlobNotFoundThe specified blob does not exist.\nRequestId:4bdbf2ca-201e-0095-757a-570533000000\nTime:2019-08-20T17:10:20.3918348Z" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-cli-batch-extensions-live-tests/foo.txt", - "body": "1", - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-blob-type": [ - "BlockBlob" - ], - "x-ms-meta-lastmodified": [ - "1524153754.2560813" - ], - "Content-MD5": [ - "xMpCOKC5I4INzFCab3WEmw==" - ], - "Content-Length": [ - "1" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "63c45fe2-c36d-11e9-a84e-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:10:20 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:luZ7DbzrTNLthPaTz0OPxcuZVcuucyVjpOYOWE5WUyI=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "x-ms-request-server-encrypted": [ - "true" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "ETag": [ - "\"0x8D7259147FB9423\"" - ], - "Content-Length": [ - "0" - ], - "Content-MD5": [ - "xMpCOKC5I4INzFCab3WEmw==" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Date": [ - "Tue, 20 Aug 2019 17:10:20 GMT" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:10:20 GMT" - ] - }, - "body": { - "string": "" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Batch/batchAccounts/sdktest2?api-version=2019-08-01", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-mgmt-batch/7.0.0 Azure-SDK-For-Python" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL21hbmFnZW1lbnQuY29yZS53aW5kb3dzLm5ldC8iLCJpc3MiOiJodHRwczovL3N0cy53aW5kb3dzLm5ldC83MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDcvIiwiaWF0IjoxNTY2MzIwNzE4LCJuYmYiOjE1NjYzMjA3MTgsImV4cCI6MTU2NjMyNDYxOCwiX2NsYWltX25hbWVzIjp7Imdyb3VwcyI6InNyYzEifSwiX2NsYWltX3NvdXJjZXMiOnsic3JjMSI6eyJlbmRwb2ludCI6Imh0dHBzOi8vZ3JhcGgud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3L3VzZXJzLzI3MjQ0OTM1LWE0YjktNDBhNC1hMjcyLTQyOTQyYjY3ZWNmMS9nZXRNZW1iZXJPYmplY3RzIn19LCJhY3IiOiIxIiwiYWlvIjoiQVVRQXUvOE1BQUFBT0lvTEVUclJGWnErQmFoaVNrVmhNNXR5QzYwSDZBSDNlZG5vMGJQbTFRYUtvV3Rva01QaDdiZjIvM0VFZ0NHbmo0UFFWY3FHaXdVbkFQYjRONmZwZ1E9PSIsImFtciI6WyJyc2EiLCJtZmEiXSwiYXBwaWQiOiIwNGIwNzc5NS04ZGRiLTQ2MWEtYmJlZS0wMmY5ZTFiZjdiNDYiLCJhcHBpZGFjciI6IjAiLCJkZXZpY2VpZCI6IjFkNTBjNWRmLWZkMDEtNGE2NC04ODU5LTg0NzIzNzU4YTI0NCIsImZhbWlseV9uYW1lIjoiS2xlaW4iLCJnaXZlbl9uYW1lIjoiQnJhbmRvbiIsImlwYWRkciI6IjEzMS4xMDcuMTU5LjIyIiwibmFtZSI6IkJyYW5kb24gS2xlaW4iLCJvaWQiOiIyNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEiLCJvbnByZW1fc2lkIjoiUy0xLTUtMjEtMjEyNzUyMTE4NC0xNjA0MDEyOTIwLTE4ODc5Mjc1MjctMzA4NjkxNzQiLCJwdWlkIjoiMTAwMzAwMDBBOTE3Nzg5RSIsInNjcCI6InVzZXJfaW1wZXJzb25hdGlvbiIsInN1YiI6IjMtaVZMWlVxZzhyWVVFNHlLRXZPSktES0N2Z1I0SVJvQXJhVzlRWmJNRkEiLCJ0aWQiOiI3MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDciLCJ1bmlxdWVfbmFtZSI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInVwbiI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInV0aSI6IlZkQ05pT2w3Z0UyWkw3QTVBMFFPQUEiLCJ2ZXIiOiIxLjAifQ.XjlVAUievRf_e8bKWsAY7Ca1e2RR2FIB4PpXBKa6Vzy5xfZ_c33OFQWkB610FXt-E86jl61B0siTx1aVQQbXt9iAdqcfb27MKeDX_sXi_BjTUIA6xgfRm1CnG8vFq_GpLPy0GIgzuQkaPqPifXIz39SzMavmrLaAp5Ct1j09e9yXwcIxLhSRg_WibgqY22tbcremd_-y9qZex3xEzc798Nz62_AADDKgBjivlwxGX5TpOiEZxhNhD6pS4nlTJ4eiyS7mFRC1nIGB1SMZrgnWjQ5dRcib_7krgdW_4J-kqA-Tg4FGo8aPFBxjMADxfCOF04W2KykUZpLfF_9c2HZGoQ" - ], - "x-ms-client-request-id": [ - "63f757ec-c36d-11e9-abd6-44032c851686" - ], - "accept-language": [ - "en-US" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "X-Content-Type-Options": [ - "nosniff" - ], - "Server": [ - "Microsoft-HTTPAPI/2.0" - ], - "ETag": [ - "\"0x8D719F74B1A7775\"" - ], - "Vary": [ - "Accept-Encoding" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "Content-Type": [ - "application/json; charset=utf-8" - ], - "Cache-Control": [ - "no-cache" - ], - "Date": [ - "Tue, 20 Aug 2019 17:10:20 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Transfer-Encoding": [ - "chunked" - ], - "Expires": [ - "-1" - ], - "Last-Modified": [ - "Mon, 05 Aug 2019 22:50:20 GMT" - ], - "content-length": [ - "2074" - ] - }, - "body": { - "string": "{\"id\":\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Batch/batchAccounts/sdktest2\",\"name\":\"sdktest2\",\"type\":\"Microsoft.Batch/batchAccounts\",\"location\":\"westcentralus\",\"properties\":{\"accountEndpoint\":\"sdktest2.westcentralus.batch.azure.com\",\"provisioningState\":\"Succeeded\",\"dedicatedCoreQuota\":20,\"dedicatedCoreQuotaPerVMFamily\":[{\"name\":\"standardA0_A7Family\",\"coreQuota\":20},{\"name\":\"standardDv2Family\",\"coreQuota\":20},{\"name\":\"standardA8_A11Family\",\"coreQuota\":0},{\"name\":\"standardDFamily\",\"coreQuota\":0},{\"name\":\"standardGFamily\",\"coreQuota\":0},{\"name\":\"basicAFamily\",\"coreQuota\":0},{\"name\":\"standardFFamily\",\"coreQuota\":0},{\"name\":\"standardNVFamily\",\"coreQuota\":0},{\"name\":\"standardNVPromoFamily\",\"coreQuota\":0},{\"name\":\"standardNCFamily\",\"coreQuota\":0},{\"name\":\"standardNCPromoFamily\",\"coreQuota\":0},{\"name\":\"standardHFamily\",\"coreQuota\":0},{\"name\":\"standardHPromoFamily\",\"coreQuota\":0},{\"name\":\"standardAv2Family\",\"coreQuota\":0},{\"name\":\"standardMSFamily\",\"coreQuota\":0},{\"name\":\"standardDv3Family\",\"coreQuota\":0},{\"name\":\"standardEv3Family\",\"coreQuota\":0},{\"name\":\"standardDSFamily\",\"coreQuota\":0},{\"name\":\"standardDSv2Family\",\"coreQuota\":0},{\"name\":\"standardDSv3Family\",\"coreQuota\":0},{\"name\":\"standardFSFamily\",\"coreQuota\":0},{\"name\":\"standardESv3Family\",\"coreQuota\":0},{\"name\":\"standardGSFamily\",\"coreQuota\":0},{\"name\":\"standardLSFamily\",\"coreQuota\":0},{\"name\":\"standardNCSv2Family\",\"coreQuota\":0},{\"name\":\"standardNDSFamily\",\"coreQuota\":0},{\"name\":\"standardNCSv3Family\",\"coreQuota\":0},{\"name\":\"standardFSv2Family\",\"coreQuota\":0},{\"name\":\"standardHBSFamily\",\"coreQuota\":0},{\"name\":\"standardHCSFamily\",\"coreQuota\":0}],\"dedicatedCoreQuotaPerVMFamilyEnforced\":false,\"lowPriorityCoreQuota\":100,\"poolQuota\":100,\"activeJobAndJobScheduleQuota\":300,\"autoStorage\":{\"storageAccountId\":\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Storage/storageAccounts/sdkteststore2\",\"lastKeySync\":\"2019-07-16T21:55:40.4909987Z\"},\"poolAllocationMode\":\"BatchService\"},\"tags\":{\"rawr\":\"test\"}}" - } - } - }, - { - "request": { - "method": "POST", - "uri": "https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sdktest/providers/Microsoft.Storage/storageAccounts/sdkteststore2/listKeys?api-version=2018-02-01", - "body": null, - "headers": { - "User-Agent": [ - "python/3.6.5 (Windows-10-10.0.18362-SP0) msrest/0.6.9 msrest_azure/0.6.1 azure-mgmt-storage/2.0.0 Azure-SDK-For-Python" - ], - "Accept-Encoding": [ - "gzip, deflate" - ], - "Accept": [ - "application/json" - ], - "Connection": [ - "keep-alive" - ], - "Authorization": [ - "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCIsImtpZCI6ImllX3FXQ1hoWHh0MXpJRXN1NGM3YWNRVkduNCJ9.eyJhdWQiOiJodHRwczovL21hbmFnZW1lbnQuY29yZS53aW5kb3dzLm5ldC8iLCJpc3MiOiJodHRwczovL3N0cy53aW5kb3dzLm5ldC83MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDcvIiwiaWF0IjoxNTY2MzIwNzE4LCJuYmYiOjE1NjYzMjA3MTgsImV4cCI6MTU2NjMyNDYxOCwiX2NsYWltX25hbWVzIjp7Imdyb3VwcyI6InNyYzEifSwiX2NsYWltX3NvdXJjZXMiOnsic3JjMSI6eyJlbmRwb2ludCI6Imh0dHBzOi8vZ3JhcGgud2luZG93cy5uZXQvNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjQ3L3VzZXJzLzI3MjQ0OTM1LWE0YjktNDBhNC1hMjcyLTQyOTQyYjY3ZWNmMS9nZXRNZW1iZXJPYmplY3RzIn19LCJhY3IiOiIxIiwiYWlvIjoiQVVRQXUvOE1BQUFBT0lvTEVUclJGWnErQmFoaVNrVmhNNXR5QzYwSDZBSDNlZG5vMGJQbTFRYUtvV3Rva01QaDdiZjIvM0VFZ0NHbmo0UFFWY3FHaXdVbkFQYjRONmZwZ1E9PSIsImFtciI6WyJyc2EiLCJtZmEiXSwiYXBwaWQiOiIwNGIwNzc5NS04ZGRiLTQ2MWEtYmJlZS0wMmY5ZTFiZjdiNDYiLCJhcHBpZGFjciI6IjAiLCJkZXZpY2VpZCI6IjFkNTBjNWRmLWZkMDEtNGE2NC04ODU5LTg0NzIzNzU4YTI0NCIsImZhbWlseV9uYW1lIjoiS2xlaW4iLCJnaXZlbl9uYW1lIjoiQnJhbmRvbiIsImlwYWRkciI6IjEzMS4xMDcuMTU5LjIyIiwibmFtZSI6IkJyYW5kb24gS2xlaW4iLCJvaWQiOiIyNzI0NDkzNS1hNGI5LTQwYTQtYTI3Mi00Mjk0MmI2N2VjZjEiLCJvbnByZW1fc2lkIjoiUy0xLTUtMjEtMjEyNzUyMTE4NC0xNjA0MDEyOTIwLTE4ODc5Mjc1MjctMzA4NjkxNzQiLCJwdWlkIjoiMTAwMzAwMDBBOTE3Nzg5RSIsInNjcCI6InVzZXJfaW1wZXJzb25hdGlvbiIsInN1YiI6IjMtaVZMWlVxZzhyWVVFNHlLRXZPSktES0N2Z1I0SVJvQXJhVzlRWmJNRkEiLCJ0aWQiOiI3MmY5ODhiZi04NmYxLTQxYWYtOTFhYi0yZDdjZDAxMWRiNDciLCJ1bmlxdWVfbmFtZSI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInVwbiI6ImJya2xlaW5AbWljcm9zb2Z0LmNvbSIsInV0aSI6IlZkQ05pT2w3Z0UyWkw3QTVBMFFPQUEiLCJ2ZXIiOiIxLjAifQ.XjlVAUievRf_e8bKWsAY7Ca1e2RR2FIB4PpXBKa6Vzy5xfZ_c33OFQWkB610FXt-E86jl61B0siTx1aVQQbXt9iAdqcfb27MKeDX_sXi_BjTUIA6xgfRm1CnG8vFq_GpLPy0GIgzuQkaPqPifXIz39SzMavmrLaAp5Ct1j09e9yXwcIxLhSRg_WibgqY22tbcremd_-y9qZex3xEzc798Nz62_AADDKgBjivlwxGX5TpOiEZxhNhD6pS4nlTJ4eiyS7mFRC1nIGB1SMZrgnWjQ5dRcib_7krgdW_4J-kqA-Tg4FGo8aPFBxjMADxfCOF04W2KykUZpLfF_9c2HZGoQ" - ], - "Content-Type": [ - "application/json; charset=utf-8" - ], - "x-ms-client-request-id": [ - "64348c5a-c36d-11e9-bba3-44032c851686" - ], - "accept-language": [ - "en-US" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 200, - "message": "OK" - }, - "headers": { - "X-Content-Type-Options": [ - "nosniff" - ], - "Server": [ - "Microsoft-Azure-Storage-Resource-Provider/1.0,Microsoft-HTTPAPI/2.0 Microsoft-HTTPAPI/2.0" - ], - "Vary": [ - "Accept-Encoding" - ], - "Strict-Transport-Security": [ - "max-age=31536000; includeSubDomains" - ], - "x-ms-ratelimit-remaining-subscription-writes": [ - "1199" - ], - "Content-Type": [ - "application/json" - ], - "Cache-Control": [ - "no-cache" - ], - "Date": [ - "Tue, 20 Aug 2019 17:10:21 GMT" - ], - "Pragma": [ - "no-cache" - ], - "Transfer-Encoding": [ - "chunked" - ], - "Expires": [ - "-1" - ], - "content-length": [ - "288" - ] - }, - "body": { - "string": "{\"keys\":[{\"keyName\":\"key1\",\"value\":\"abc==\",\"permissions\":\"FULL\"},{\"keyName\":\"key2\",\"value\":\"def==\",\"permissions\":\"FULL\"}]}" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-cli-batch-extensions-live-tests?restype=container", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "646d2eb6-c36d-11e9-94af-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:10:21 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:1isvSPc/8yB/u2FpErFqYsn4rl5T9zNv2dKWniT5ZqY=" - ], - "Content-Length": [ - "0" - ] - } - }, - "response": { - "status": { - "code": 409, - "message": "The specified container already exists." - }, - "headers": { - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-error-code": [ - "ContainerAlreadyExists" - ], - "Content-Type": [ - "application/xml" - ], - "Date": [ - "Tue, 20 Aug 2019 17:10:21 GMT" - ], - "Content-Length": [ - "230" - ] - }, - "body": { - "string": "\ufeffContainerAlreadyExistsThe specified container already exists.\nRequestId:2ba5f07f-801e-00d7-587a-572e27000000\nTime:2019-08-20T17:10:21.6712573Z" - } - } - }, - { - "request": { - "method": "GET", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-cli-batch-extensions-live-tests/test/data/foo.txt?comp=metadata", - "body": null, - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "64877314-c36d-11e9-b7df-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:10:21 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:aAHSeQqJMQfy81sMSEeHBMNxvc4B5Sq9dWqWou4q/7M=" - ] - } - }, - "response": { - "status": { - "code": 404, - "message": "The specified blob does not exist." - }, - "headers": { - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-error-code": [ - "BlobNotFound" - ], - "Content-Type": [ - "application/xml" - ], - "Date": [ - "Tue, 20 Aug 2019 17:10:21 GMT" - ], - "Content-Length": [ - "215" - ] - }, - "body": { - "string": "\ufeffBlobNotFoundThe specified blob does not exist.\nRequestId:2ba5f090-801e-00d7-677a-572e27000000\nTime:2019-08-20T17:10:21.7162882Z" - } - } - }, - { - "request": { - "method": "PUT", - "uri": "https://sdkteststore2.blob.core.windows.net/fgrp-cli-batch-extensions-live-tests/test/data/foo.txt", - "body": "1", - "headers": { - "User-Agent": [ - "Azure-Storage/1.1.0-1.1.0 (Python CPython 3.6.5; Windows 10)" - ], - "Connection": [ - "keep-alive" - ], - "x-ms-blob-type": [ - "BlockBlob" - ], - "x-ms-meta-lastmodified": [ - "1524153754.2560813" - ], - "Content-MD5": [ - "xMpCOKC5I4INzFCab3WEmw==" - ], - "Content-Length": [ - "1" - ], - "x-ms-version": [ - "2017-07-29" - ], - "x-ms-client-request-id": [ - "648e5078-c36d-11e9-bd72-44032c851686" - ], - "x-ms-date": [ - "Tue, 20 Aug 2019 17:10:21 GMT" - ], - "Authorization": [ - "SharedKey sdkteststore2:CA4y9UdmvqQCSTVMsl3UlY5NEZN/hlYLMW8pBl8xt04=" - ] - } - }, - "response": { - "status": { - "code": 201, - "message": "Created" - }, - "headers": { - "x-ms-request-server-encrypted": [ - "true" - ], - "Server": [ - "Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0" - ], - "ETag": [ - "\"0x8D7259148C5E662\"" - ], - "Content-Length": [ - "0" - ], - "Content-MD5": [ - "xMpCOKC5I4INzFCab3WEmw==" - ], - "x-ms-version": [ - "2017-07-29" - ], - "Date": [ - "Tue, 20 Aug 2019 17:10:21 GMT" - ], - "Last-Modified": [ - "Tue, 20 Aug 2019 17:10:21 GMT" - ] - }, - "body": { - "string": "" - } - } - } - ] -} \ No newline at end of file diff --git a/tests/test_extensions.py b/tests/test_extensions.py index 3124ba88..821b9729 100644 --- a/tests/test_extensions.py +++ b/tests/test_extensions.py @@ -625,12 +625,13 @@ def test_batch_extensions_parse_invalid_parametricsweep(self): utils._expand_parametric_sweep(template) # pylint: disable=protected-access def test_batch_extensions_preserve_resourcefiles(self): - fileutils = file_utils.FileUtils(None) + fileutils = file_utils.FileUtils(MagicMock(spec=BlockBlobService)) request = Mock( resource_files=[ Mock( http_url='abc', - file_path='xyz') + file_path='xyz', + source=None) ]) transformed = utils.post_processing(request, fileutils, pool_utils.PoolOperatingSystemFlavor.LINUX) self.assertEqual(transformed, request) @@ -638,27 +639,26 @@ def test_batch_extensions_preserve_resourcefiles(self): common_resource_files=[ Mock( http_url='abc', - file_path='xyz') + file_path='xyz', + source=None) ], job_manager_task=Mock( resource_files=[ Mock( http_url='foo', - file_path='bar') + file_path='bar', + source=None) ] ) ) transformed = utils.post_processing(request, fileutils, pool_utils.PoolOperatingSystemFlavor.WINDOWS) self.assertEqual(transformed, request) request = [ # pylint: disable=redefined-variable-type - Mock(resource_files=[Mock(http_url='abc', file_path='xyz')]), - Mock(resource_files=[Mock(http_url='abc', file_path='xyz')]) + Mock(resource_files=[Mock(http_url='abc', file_path='xyz', source=None)]), + Mock(resource_files=[Mock(http_url='abc', file_path='xyz', source=None)]) ] transformed = utils.post_processing(request, fileutils, pool_utils.PoolOperatingSystemFlavor.WINDOWS) self.assertEqual(transformed, request) - request = Mock(resource_files=[Mock(http_url='abc', file_path=None)]) - with self.assertRaises(ValueError): - utils.post_processing(request, fileutils, pool_utils.PoolOperatingSystemFlavor.WINDOWS) def test_batch_extensions_validate_parameter(self): content = { @@ -1654,9 +1654,3 @@ def test_batch_template_reject(self): parameter_obj = json.load(parameter) pool_ops = operations.ExtendedPoolOperations(None, None, None, self._serialize, self._deserialize, None) pool_template_json = pool_ops.expand_template(template_obj, parameter_obj) - - with self.assertRaises(NotImplementedError): - job_ops.jobparameter_from_json(job_template_json) - - with self.assertRaises(NotImplementedError): - pool_ops.poolparameter_from_json(pool_template_json) diff --git a/tests/test_live.py b/tests/test_live.py index d3106fe5..e18acc9d 100644 --- a/tests/test_live.py +++ b/tests/test_live.py @@ -144,18 +144,21 @@ def wait_for_pool_steady(self, pool_id, timeout): print('waiting for pool to reach steady state') while True: - pool = self.batch_client.pool.get(pool_id) - if pool.allocation_state == AllocationState.steady: - print('pool reached steady state') - return + try: + pool = self.batch_client.pool.get(pool_id) + except BatchErrorException: + pass else: - wait_for = 3 - timeout = timeout - wait_for - if timeout < 0: - raise RuntimeError('Timed out') - else: - import time - time.sleep(wait_for) + if pool.allocation_state == AllocationState.steady: + print('pool reached steady state') + return + wait_for = 3 + timeout = timeout - wait_for + if timeout < 0: + raise RuntimeError('Timed out') + else: + import time + time.sleep(wait_for) def wait_for_vms_idle(self, pool_id, timeout): print('waiting for vms to be idle') @@ -343,7 +346,7 @@ def sku_filter_function(skus): try: add_pool = self.batch_client._deserialize('PoolAddParameter', pool) # pylint:disable=protected-access - self.batch_client.pool.add(add_pool) + self.batch_client.pool_extensions.add(add_pool) print('Successfully created pool {}'.format(pool_id)) except BatchErrorException as ex: if ex.error.code == 'PoolExists': @@ -428,11 +431,11 @@ def body(self): # Batch Explorer workflow with open(os.path.join(self.data_dir, 'batch.pool.simple.resourcefile-legacy.json'), 'r') as template: json_obj = json.load(template) - expanded_template = self.batch_client.pool.expand_template(json_obj) - pool_param = self.batch_client.pool.poolparameter_from_json(expanded_template) - self.batch_client.pool.add(pool_param) - self.wait_for_pool_steady(pool_param.id, 5 * 60) - self.batch_client.pool.delete(pool_param.id) + expanded_template = self.batch_client.pool_extensions.expand_template(json_obj) + pool_param = self.batch_client.pool_extensions.poolparameter_from_json(expanded_template) + pool = self.batch_client.pool_extensions.add(pool_param) + self.wait_for_pool_steady(pool_param.properties.id, 5 * 60) + self.batch_client.pool.delete(pool_param.properties.id) # Batch simple legacy task factory self.cmd("batch job create --template '{}'".format(os.path.join(