From f06eafb75dd7d557fd05dd71488d388d87c8b195 Mon Sep 17 00:00:00 2001 From: SDKAuto Date: Wed, 19 Mar 2025 17:54:17 +0000 Subject: [PATCH] CodeGen from PR 32796 in Azure/azure-rest-api-specs Merge cad21c2d4d0f279e621e4801fcaf23f4e7e6c2dc into e29cd481a6a8f63a5a7196ff081e5c775572ad04 --- .../src/Generated/AutoUserSpecification.cs | 4 +- .../src/Generated/AutomaticOsUpgradePolicy.cs | 4 +- .../BatchCertificate.Serialization.cs | 294 ++ .../src/Generated/BatchCertificate.cs | 128 + .../src/Generated/BatchCertificateFormat.cs | 51 + ...BatchCertificateReference.Serialization.cs | 207 + .../Generated/BatchCertificateReference.cs | 95 + .../src/Generated/BatchCertificateState.cs | 54 + .../BatchCertificateStoreLocation.cs | 51 + .../Generated/BatchCertificateVisibility.cs | 54 + .../src/Generated/BatchClient.cs | 3685 +++++++++++------ .../src/Generated/BatchClientOptions.cs | 8 +- .../src/Generated/BatchJob.cs | 4 +- .../src/Generated/BatchJobManagerTask.cs | 6 +- ...chJobNetworkConfiguration.Serialization.cs | 10 +- .../Generated/BatchJobNetworkConfiguration.cs | 16 +- .../src/Generated/BatchJobPreparationTask.cs | 6 +- .../src/Generated/BatchJobReleaseTask.cs | 6 +- ...atchJobScheduleStatistics.Serialization.cs | 20 +- .../BatchJobStatistics.Serialization.cs | 20 +- .../BatchJobUpdateContent.Serialization.cs | 16 + .../src/Generated/BatchJobUpdateContent.cs | 6 +- .../src/Generated/BatchNode.Serialization.cs | 26 + .../src/Generated/BatchNode.cs | 21 +- .../BatchNodeCounts.Serialization.cs | 18 + .../src/Generated/BatchNodeCounts.cs | 16 +- ...atchNodeDeallocateContent.Serialization.cs | 149 + .../Generated/BatchNodeDeallocateContent.cs | 65 + .../Generated/BatchNodeDeallocateOption.cs | 57 + .../BatchNodeReimageContent.Serialization.cs | 149 + .../src/Generated/BatchNodeReimageContent.cs | 65 + .../src/Generated/BatchNodeReimageOption.cs | 57 + .../src/Generated/BatchNodeState.cs | 6 + .../Generated/BatchNodeUserCreateContent.cs | 4 +- .../Generated/BatchNodeUserUpdateContent.cs | 4 +- .../src/Generated/BatchPool.Serialization.cs | 26 + .../src/Generated/BatchPool.cs | 25 +- .../BatchPoolCreateContent.Serialization.cs | 26 + .../src/Generated/BatchPoolCreateContent.cs | 27 +- .../BatchPoolEnableAutoScaleContent.cs | 4 +- .../BatchPoolEvaluateAutoScaleContent.cs | 6 +- .../BatchPoolReplaceContent.Serialization.cs | 7 + .../src/Generated/BatchPoolReplaceContent.cs | 34 +- ...tchPoolResourceStatistics.Serialization.cs | 8 +- .../BatchPoolSpecification.Serialization.cs | 26 + .../src/Generated/BatchPoolSpecification.cs | 25 +- .../BatchPoolUpdateContent.Serialization.cs | 232 +- .../src/Generated/BatchPoolUpdateContent.cs | 68 +- .../src/Generated/BatchPoolUsageMetrics.cs | 6 +- .../src/Generated/BatchStartTask.cs | 6 +- .../src/Generated/BatchTask.cs | 4 +- ...atchTaskContainerSettings.Serialization.cs | 33 +- .../Generated/BatchTaskContainerSettings.cs | 7 +- .../src/Generated/BatchTaskCreateContent.cs | 6 +- .../BatchTaskStatistics.Serialization.cs | 8 +- .../src/Generated/ComputeBatchModelFactory.cs | 134 +- ...erHostBatchBindMountEntry.Serialization.cs | 164 + .../ContainerHostBatchBindMountEntry.cs | 69 + .../src/Generated/ContainerHostDataPath.cs | 63 + ...leteBatchCertificateError.Serialization.cs | 181 + .../Generated/DeleteBatchCertificateError.cs | 74 + .../src/Generated/DiffDiskPlacement.cs | 2 +- .../src/Generated/DiffDiskSettings.cs | 4 +- .../Generated/DiskEncryptionConfiguration.cs | 4 +- .../src/Generated/Docs/BatchClient.xml | 924 ++++- .../Generated/FileProperties.Serialization.cs | 4 +- .../GetCertificateResponse.Serialization.cs | 257 ++ .../src/Generated/GetCertificateResponse.cs | 110 + .../Generated/ImageReference.Serialization.cs | 24 + .../src/Generated/ImageReference.cs | 14 +- .../src/Generated/InboundNatPool.cs | 6 +- .../Generated/ManagedDisk.Serialization.cs | 30 +- .../src/Generated/ManagedDisk.cs | 17 +- .../src/Generated/NetworkConfiguration.cs | 12 +- .../OutputFileBlobContainerDestination.cs | 4 +- .../src/Generated/SecurityEncryptionTypes.cs | 51 + .../src/Generated/SecurityProfile.cs | 6 +- .../src/Generated/SecurityTypes.cs | 3 + .../src/Generated/UpgradePolicy.cs | 4 +- .../VMDiskSecurityProfile.Serialization.cs | 149 + .../src/Generated/VMDiskSecurityProfile.cs | 65 + .../Generated/VirtualMachineConfiguration.cs | 4 +- .../src/Generated/WindowsUserConfiguration.cs | 4 +- .../Generated/Samples/Samples_BatchClient.cs | 1502 +++++-- .../Azure.Compute.Batch/tsp-location.yaml | 3 +- 85 files changed, 8069 insertions(+), 1785 deletions(-) create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificate.Serialization.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificate.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateFormat.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateReference.Serialization.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateReference.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateState.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateStoreLocation.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateVisibility.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateContent.Serialization.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateContent.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateOption.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageContent.Serialization.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageContent.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageOption.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/ContainerHostBatchBindMountEntry.Serialization.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/ContainerHostBatchBindMountEntry.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/ContainerHostDataPath.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/DeleteBatchCertificateError.Serialization.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/DeleteBatchCertificateError.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/GetCertificateResponse.Serialization.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/GetCertificateResponse.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/SecurityEncryptionTypes.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/VMDiskSecurityProfile.Serialization.cs create mode 100644 sdk/batch/Azure.Compute.Batch/src/Generated/VMDiskSecurityProfile.cs diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AutoUserSpecification.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AutoUserSpecification.cs index 655ea827b2ca..ecd162d250ab 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/AutoUserSpecification.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AutoUserSpecification.cs @@ -51,7 +51,7 @@ public AutoUserSpecification() } /// Initializes a new instance of . - /// The scope for the auto user. The default value is pool. If the pool is running Windows, a value of Task should be specified if stricter isolation between tasks is required, such as if the task mutates the registry in a way which could impact other tasks. + /// The scope for the auto user. The default value is pool. If the pool is running Windows a value of Task should be specified if stricter isolation between tasks is required. For example, if the task mutates the registry in a way which could impact other tasks, or if certificates have been specified on the pool which should not be accessible by normal tasks but should be accessible by StartTasks. /// The elevation level of the auto user. The default value is nonAdmin. /// Keeps track of any properties unknown to the library. internal AutoUserSpecification(AutoUserScope? scope, ElevationLevel? elevationLevel, IDictionary serializedAdditionalRawData) @@ -61,7 +61,7 @@ internal AutoUserSpecification(AutoUserScope? scope, ElevationLevel? elevationLe _serializedAdditionalRawData = serializedAdditionalRawData; } - /// The scope for the auto user. The default value is pool. If the pool is running Windows, a value of Task should be specified if stricter isolation between tasks is required, such as if the task mutates the registry in a way which could impact other tasks. + /// The scope for the auto user. The default value is pool. If the pool is running Windows a value of Task should be specified if stricter isolation between tasks is required. For example, if the task mutates the registry in a way which could impact other tasks, or if certificates have been specified on the pool which should not be accessible by normal tasks but should be accessible by StartTasks. public AutoUserScope? Scope { get; set; } /// The elevation level of the auto user. The default value is nonAdmin. public ElevationLevel? ElevationLevel { get; set; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AutomaticOsUpgradePolicy.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AutomaticOsUpgradePolicy.cs index a55f474ba013..c3b9be08a2f0 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/AutomaticOsUpgradePolicy.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AutomaticOsUpgradePolicy.cs @@ -52,7 +52,7 @@ public AutomaticOsUpgradePolicy() /// Initializes a new instance of . /// Whether OS image rollback feature should be disabled. - /// Indicates whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available. <br /><br /> If this is set to true for Windows based pools, [WindowsConfiguration.enableAutomaticUpdates](https://learn.microsoft.com/en-us/rest/api/batchservice/pool/add?tabs=HTTP#windowsconfiguration) cannot be set to true. + /// Indicates whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available. <br /><br /> If this is set to true for Windows based pools, [WindowsConfiguration.enableAutomaticUpdates](https://learn.microsoft.com/rest/api/batchservice/pool/add?tabs=HTTP#windowsconfiguration) cannot be set to true. /// Indicates whether rolling upgrade policy should be used during Auto OS Upgrade. Auto OS Upgrade will fallback to the default policy if no policy is defined on the VMSS. /// Defer OS upgrades on the TVMs if they are running tasks. /// Keeps track of any properties unknown to the library. @@ -67,7 +67,7 @@ internal AutomaticOsUpgradePolicy(bool? disableAutomaticRollback, bool? enableAu /// Whether OS image rollback feature should be disabled. public bool? DisableAutomaticRollback { get; set; } - /// Indicates whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available. <br /><br /> If this is set to true for Windows based pools, [WindowsConfiguration.enableAutomaticUpdates](https://learn.microsoft.com/en-us/rest/api/batchservice/pool/add?tabs=HTTP#windowsconfiguration) cannot be set to true. + /// Indicates whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available. <br /><br /> If this is set to true for Windows based pools, [WindowsConfiguration.enableAutomaticUpdates](https://learn.microsoft.com/rest/api/batchservice/pool/add?tabs=HTTP#windowsconfiguration) cannot be set to true. public bool? EnableAutomaticOsUpgrade { get; set; } /// Indicates whether rolling upgrade policy should be used during Auto OS Upgrade. Auto OS Upgrade will fallback to the default policy if no policy is defined on the VMSS. public bool? UseRollingUpgradePolicy { get; set; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificate.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificate.Serialization.cs new file mode 100644 index 000000000000..123ea0c12d5d --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificate.Serialization.cs @@ -0,0 +1,294 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchCertificate : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + JsonModelWriteCore(writer, options); + writer.WriteEndObject(); + } + + /// The JSON writer. + /// The client options for reading and writing models. + protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchCertificate)} does not support writing '{format}' format."); + } + + writer.WritePropertyName("thumbprint"u8); + writer.WriteStringValue(Thumbprint); + writer.WritePropertyName("thumbprintAlgorithm"u8); + writer.WriteStringValue(ThumbprintAlgorithm); + if (options.Format != "W" && Optional.IsDefined(Url)) + { + writer.WritePropertyName("url"u8); + writer.WriteStringValue(Url); + } + if (options.Format != "W" && Optional.IsDefined(State)) + { + writer.WritePropertyName("state"u8); + writer.WriteStringValue(State.Value.ToString()); + } + if (options.Format != "W" && Optional.IsDefined(StateTransitionTime)) + { + writer.WritePropertyName("stateTransitionTime"u8); + writer.WriteStringValue(StateTransitionTime.Value, "O"); + } + if (options.Format != "W" && Optional.IsDefined(PreviousState)) + { + writer.WritePropertyName("previousState"u8); + writer.WriteStringValue(PreviousState.Value.ToString()); + } + if (options.Format != "W" && Optional.IsDefined(PreviousStateTransitionTime)) + { + writer.WritePropertyName("previousStateTransitionTime"u8); + writer.WriteStringValue(PreviousStateTransitionTime.Value, "O"); + } + if (options.Format != "W" && Optional.IsDefined(PublicData)) + { + writer.WritePropertyName("publicData"u8); + writer.WriteStringValue(PublicData); + } + if (options.Format != "W" && Optional.IsDefined(DeleteCertificateError)) + { + writer.WritePropertyName("deleteCertificateError"u8); + writer.WriteObjectValue(DeleteCertificateError, options); + } + writer.WritePropertyName("data"u8); + writer.WriteStringValue(Data); + if (Optional.IsDefined(CertificateFormat)) + { + writer.WritePropertyName("certificateFormat"u8); + writer.WriteStringValue(CertificateFormat.Value.ToString()); + } + if (Optional.IsDefined(Password)) + { + writer.WritePropertyName("password"u8); + writer.WriteStringValue(Password); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + } + + BatchCertificate IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchCertificate)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchCertificate(document.RootElement, options); + } + + internal static BatchCertificate DeserializeBatchCertificate(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string thumbprint = default; + string thumbprintAlgorithm = default; + string url = default; + BatchCertificateState? state = default; + DateTimeOffset? stateTransitionTime = default; + BatchCertificateState? previousState = default; + DateTimeOffset? previousStateTransitionTime = default; + string publicData = default; + DeleteBatchCertificateError deleteCertificateError = default; + string data = default; + BatchCertificateFormat? certificateFormat = default; + string password = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("thumbprint"u8)) + { + thumbprint = property.Value.GetString(); + continue; + } + if (property.NameEquals("thumbprintAlgorithm"u8)) + { + thumbprintAlgorithm = property.Value.GetString(); + continue; + } + if (property.NameEquals("url"u8)) + { + url = property.Value.GetString(); + continue; + } + if (property.NameEquals("state"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + state = new BatchCertificateState(property.Value.GetString()); + continue; + } + if (property.NameEquals("stateTransitionTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + stateTransitionTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("previousState"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + previousState = new BatchCertificateState(property.Value.GetString()); + continue; + } + if (property.NameEquals("previousStateTransitionTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + previousStateTransitionTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("publicData"u8)) + { + publicData = property.Value.GetString(); + continue; + } + if (property.NameEquals("deleteCertificateError"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + deleteCertificateError = DeleteBatchCertificateError.DeserializeDeleteBatchCertificateError(property.Value, options); + continue; + } + if (property.NameEquals("data"u8)) + { + data = property.Value.GetString(); + continue; + } + if (property.NameEquals("certificateFormat"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + certificateFormat = new BatchCertificateFormat(property.Value.GetString()); + continue; + } + if (property.NameEquals("password"u8)) + { + password = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchCertificate( + thumbprint, + thumbprintAlgorithm, + url, + state, + stateTransitionTime, + previousState, + previousStateTransitionTime, + publicData, + deleteCertificateError, + data, + certificateFormat, + password, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchCertificate)} does not support writing '{options.Format}' format."); + } + } + + BatchCertificate IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeBatchCertificate(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchCertificate)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchCertificate FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeBatchCertificate(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificate.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificate.cs new file mode 100644 index 000000000000..6e858116505e --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificate.cs @@ -0,0 +1,128 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// A Certificate that can be installed on Compute Nodes and can be used to + /// authenticate operations on the machine. + /// + public partial class BatchCertificate + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may include spaces but these are removed). + /// The algorithm used to derive the thumbprint. This must be sha1. + /// The base64-encoded contents of the Certificate. The maximum size is 10KB. + /// , or is null. + public BatchCertificate(string thumbprint, string thumbprintAlgorithm, string data) + { + Argument.AssertNotNull(thumbprint, nameof(thumbprint)); + Argument.AssertNotNull(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); + Argument.AssertNotNull(data, nameof(data)); + + Thumbprint = thumbprint; + ThumbprintAlgorithm = thumbprintAlgorithm; + Data = data; + } + + /// Initializes a new instance of . + /// The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may include spaces but these are removed). + /// The algorithm used to derive the thumbprint. This must be sha1. + /// The URL of the Certificate. + /// The state of the Certificate. + /// The time at which the Certificate entered its current state. + /// The previous state of the Certificate. This property is not set if the Certificate is in its initial active state. + /// The time at which the Certificate entered its previous state. This property is not set if the Certificate is in its initial Active state. + /// The public part of the Certificate as a base-64 encoded .cer file. + /// The error that occurred on the last attempt to delete this Certificate. This property is set only if the Certificate is in the DeleteFailed state. + /// The base64-encoded contents of the Certificate. The maximum size is 10KB. + /// The format of the Certificate data. + /// The password to access the Certificate's private key. This must be omitted if the Certificate format is cer. + /// Keeps track of any properties unknown to the library. + internal BatchCertificate(string thumbprint, string thumbprintAlgorithm, string url, BatchCertificateState? state, DateTimeOffset? stateTransitionTime, BatchCertificateState? previousState, DateTimeOffset? previousStateTransitionTime, string publicData, DeleteBatchCertificateError deleteCertificateError, string data, BatchCertificateFormat? certificateFormat, string password, IDictionary serializedAdditionalRawData) + { + Thumbprint = thumbprint; + ThumbprintAlgorithm = thumbprintAlgorithm; + Url = url; + State = state; + StateTransitionTime = stateTransitionTime; + PreviousState = previousState; + PreviousStateTransitionTime = previousStateTransitionTime; + PublicData = publicData; + DeleteCertificateError = deleteCertificateError; + Data = data; + CertificateFormat = certificateFormat; + Password = password; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchCertificate() + { + } + + /// The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may include spaces but these are removed). + public string Thumbprint { get; set; } + /// The algorithm used to derive the thumbprint. This must be sha1. + public string ThumbprintAlgorithm { get; set; } + /// The URL of the Certificate. + public string Url { get; } + /// The state of the Certificate. + public BatchCertificateState? State { get; } + /// The time at which the Certificate entered its current state. + public DateTimeOffset? StateTransitionTime { get; } + /// The previous state of the Certificate. This property is not set if the Certificate is in its initial active state. + public BatchCertificateState? PreviousState { get; } + /// The time at which the Certificate entered its previous state. This property is not set if the Certificate is in its initial Active state. + public DateTimeOffset? PreviousStateTransitionTime { get; } + /// The public part of the Certificate as a base-64 encoded .cer file. + public string PublicData { get; } + /// The error that occurred on the last attempt to delete this Certificate. This property is set only if the Certificate is in the DeleteFailed state. + public DeleteBatchCertificateError DeleteCertificateError { get; } + /// The base64-encoded contents of the Certificate. The maximum size is 10KB. + public string Data { get; set; } + /// The format of the Certificate data. + public BatchCertificateFormat? CertificateFormat { get; set; } + /// The password to access the Certificate's private key. This must be omitted if the Certificate format is cer. + public string Password { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateFormat.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateFormat.cs new file mode 100644 index 000000000000..121b2cffc624 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateFormat.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchCertificateFormat enums. + public readonly partial struct BatchCertificateFormat : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchCertificateFormat(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string PfxValue = "pfx"; + private const string CerValue = "cer"; + + /// The Certificate is a PFX (PKCS#12) formatted Certificate or Certificate chain. + public static BatchCertificateFormat Pfx { get; } = new BatchCertificateFormat(PfxValue); + /// The Certificate is a base64-encoded X.509 Certificate. + public static BatchCertificateFormat Cer { get; } = new BatchCertificateFormat(CerValue); + /// Determines if two values are the same. + public static bool operator ==(BatchCertificateFormat left, BatchCertificateFormat right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchCertificateFormat left, BatchCertificateFormat right) => !left.Equals(right); + /// Converts a to a . + public static implicit operator BatchCertificateFormat(string value) => new BatchCertificateFormat(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchCertificateFormat other && Equals(other); + /// + public bool Equals(BatchCertificateFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateReference.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateReference.Serialization.cs new file mode 100644 index 000000000000..265e7a1d65fd --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateReference.Serialization.cs @@ -0,0 +1,207 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchCertificateReference : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + JsonModelWriteCore(writer, options); + writer.WriteEndObject(); + } + + /// The JSON writer. + /// The client options for reading and writing models. + protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchCertificateReference)} does not support writing '{format}' format."); + } + + writer.WritePropertyName("thumbprint"u8); + writer.WriteStringValue(Thumbprint); + writer.WritePropertyName("thumbprintAlgorithm"u8); + writer.WriteStringValue(ThumbprintAlgorithm); + if (Optional.IsDefined(StoreLocation)) + { + writer.WritePropertyName("storeLocation"u8); + writer.WriteStringValue(StoreLocation.Value.ToString()); + } + if (Optional.IsDefined(StoreName)) + { + writer.WritePropertyName("storeName"u8); + writer.WriteStringValue(StoreName); + } + if (Optional.IsCollectionDefined(Visibility)) + { + writer.WritePropertyName("visibility"u8); + writer.WriteStartArray(); + foreach (var item in Visibility) + { + writer.WriteStringValue(item.ToString()); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + } + + BatchCertificateReference IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchCertificateReference)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchCertificateReference(document.RootElement, options); + } + + internal static BatchCertificateReference DeserializeBatchCertificateReference(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string thumbprint = default; + string thumbprintAlgorithm = default; + BatchCertificateStoreLocation? storeLocation = default; + string storeName = default; + IList visibility = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("thumbprint"u8)) + { + thumbprint = property.Value.GetString(); + continue; + } + if (property.NameEquals("thumbprintAlgorithm"u8)) + { + thumbprintAlgorithm = property.Value.GetString(); + continue; + } + if (property.NameEquals("storeLocation"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + storeLocation = new BatchCertificateStoreLocation(property.Value.GetString()); + continue; + } + if (property.NameEquals("storeName"u8)) + { + storeName = property.Value.GetString(); + continue; + } + if (property.NameEquals("visibility"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(new BatchCertificateVisibility(item.GetString())); + } + visibility = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchCertificateReference( + thumbprint, + thumbprintAlgorithm, + storeLocation, + storeName, + visibility ?? new ChangeTrackingList(), + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchCertificateReference)} does not support writing '{options.Format}' format."); + } + } + + BatchCertificateReference IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeBatchCertificateReference(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchCertificateReference)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchCertificateReference FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeBatchCertificateReference(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateReference.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateReference.cs new file mode 100644 index 000000000000..5255ea9eff66 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateReference.cs @@ -0,0 +1,95 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// A reference to a Certificate to be installed on Compute Nodes in a Pool. Warning: This object is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + public partial class BatchCertificateReference + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The thumbprint of the Certificate. + /// The algorithm with which the thumbprint is associated. This must be sha1. + /// or is null. + public BatchCertificateReference(string thumbprint, string thumbprintAlgorithm) + { + Argument.AssertNotNull(thumbprint, nameof(thumbprint)); + Argument.AssertNotNull(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); + + Thumbprint = thumbprint; + ThumbprintAlgorithm = thumbprintAlgorithm; + Visibility = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The thumbprint of the Certificate. + /// The algorithm with which the thumbprint is associated. This must be sha1. + /// The location of the Certificate store on the Compute Node into which to install the Certificate. The default value is currentuser. This property is applicable only for Pools configured with Windows Compute Nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows Image reference). For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + /// The name of the Certificate store on the Compute Node into which to install the Certificate. This property is applicable only for Pools configured with Windows Compute Nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows Image reference). Common store names include: My, Root, CA, Trust, Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but any custom store name can also be used. The default value is My. + /// Which user Accounts on the Compute Node should have access to the private data of the Certificate. You can specify more than one visibility in this collection. The default is all Accounts. + /// Keeps track of any properties unknown to the library. + internal BatchCertificateReference(string thumbprint, string thumbprintAlgorithm, BatchCertificateStoreLocation? storeLocation, string storeName, IList visibility, IDictionary serializedAdditionalRawData) + { + Thumbprint = thumbprint; + ThumbprintAlgorithm = thumbprintAlgorithm; + StoreLocation = storeLocation; + StoreName = storeName; + Visibility = visibility; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchCertificateReference() + { + } + + /// The thumbprint of the Certificate. + public string Thumbprint { get; set; } + /// The algorithm with which the thumbprint is associated. This must be sha1. + public string ThumbprintAlgorithm { get; set; } + /// The location of the Certificate store on the Compute Node into which to install the Certificate. The default value is currentuser. This property is applicable only for Pools configured with Windows Compute Nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows Image reference). For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + public BatchCertificateStoreLocation? StoreLocation { get; set; } + /// The name of the Certificate store on the Compute Node into which to install the Certificate. This property is applicable only for Pools configured with Windows Compute Nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows Image reference). Common store names include: My, Root, CA, Trust, Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but any custom store name can also be used. The default value is My. + public string StoreName { get; set; } + /// Which user Accounts on the Compute Node should have access to the private data of the Certificate. You can specify more than one visibility in this collection. The default is all Accounts. + public IList Visibility { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateState.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateState.cs new file mode 100644 index 000000000000..59674caeb91c --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateState.cs @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchCertificateState enums. + public readonly partial struct BatchCertificateState : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchCertificateState(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ActiveValue = "active"; + private const string DeletingValue = "deleting"; + private const string DeleteFailedValue = "deletefailed"; + + /// The Certificate is available for use in Pools. + public static BatchCertificateState Active { get; } = new BatchCertificateState(ActiveValue); + /// The user has requested that the Certificate be deleted, but the delete operation has not yet completed. You may not reference the Certificate when creating or updating Pools. + public static BatchCertificateState Deleting { get; } = new BatchCertificateState(DeletingValue); + /// The user requested that the Certificate be deleted, but there are Pools that still have references to the Certificate, or it is still installed on one or more Nodes. (The latter can occur if the Certificate has been removed from the Pool, but the Compute Node has not yet restarted. Compute Nodes refresh their Certificates only when they restart.) You may use the cancel Certificate delete operation to cancel the delete, or the delete Certificate operation to retry the delete. + public static BatchCertificateState DeleteFailed { get; } = new BatchCertificateState(DeleteFailedValue); + /// Determines if two values are the same. + public static bool operator ==(BatchCertificateState left, BatchCertificateState right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchCertificateState left, BatchCertificateState right) => !left.Equals(right); + /// Converts a to a . + public static implicit operator BatchCertificateState(string value) => new BatchCertificateState(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchCertificateState other && Equals(other); + /// + public bool Equals(BatchCertificateState other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateStoreLocation.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateStoreLocation.cs new file mode 100644 index 000000000000..191a6833d3f3 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateStoreLocation.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchCertificateStoreLocation enums. + public readonly partial struct BatchCertificateStoreLocation : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchCertificateStoreLocation(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string CurrentUserValue = "currentuser"; + private const string LocalMachineValue = "localmachine"; + + /// Certificates should be installed to the CurrentUser Certificate store. + public static BatchCertificateStoreLocation CurrentUser { get; } = new BatchCertificateStoreLocation(CurrentUserValue); + /// Certificates should be installed to the LocalMachine Certificate store. + public static BatchCertificateStoreLocation LocalMachine { get; } = new BatchCertificateStoreLocation(LocalMachineValue); + /// Determines if two values are the same. + public static bool operator ==(BatchCertificateStoreLocation left, BatchCertificateStoreLocation right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchCertificateStoreLocation left, BatchCertificateStoreLocation right) => !left.Equals(right); + /// Converts a to a . + public static implicit operator BatchCertificateStoreLocation(string value) => new BatchCertificateStoreLocation(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchCertificateStoreLocation other && Equals(other); + /// + public bool Equals(BatchCertificateStoreLocation other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateVisibility.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateVisibility.cs new file mode 100644 index 000000000000..ecd194378a4a --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateVisibility.cs @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchCertificateVisibility enums. + public readonly partial struct BatchCertificateVisibility : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchCertificateVisibility(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string StartTaskValue = "starttask"; + private const string TaskValue = "task"; + private const string RemoteUserValue = "remoteuser"; + + /// The Certificate should be visible to the user account under which the StartTask is run. Note that if AutoUser Scope is Pool for both the StartTask and a Task, this certificate will be visible to the Task as well. + public static BatchCertificateVisibility StartTask { get; } = new BatchCertificateVisibility(StartTaskValue); + /// The Certificate should be visible to the user accounts under which Job Tasks are run. + public static BatchCertificateVisibility Task { get; } = new BatchCertificateVisibility(TaskValue); + /// The Certificate should be visible to the user accounts under which users remotely access the Compute Node. + public static BatchCertificateVisibility RemoteUser { get; } = new BatchCertificateVisibility(RemoteUserValue); + /// Determines if two values are the same. + public static bool operator ==(BatchCertificateVisibility left, BatchCertificateVisibility right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchCertificateVisibility left, BatchCertificateVisibility right) => !left.Equals(right); + /// Converts a to a . + public static implicit operator BatchCertificateVisibility(string value) => new BatchCertificateVisibility(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchCertificateVisibility other && Equals(other); + /// + public bool Equals(BatchCertificateVisibility other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClient.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClient.cs index 47672eb0c12f..2021789f8bd3 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClient.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClient.cs @@ -1656,14 +1656,15 @@ public virtual Response RemoveNodes(string poolId, RequestContent content, int? /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// If true, the server will delete the Job even if the corresponding nodes have not fully processed the deletion. The default value is false. /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task DeleteJobAsync(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task DeleteJobAsync(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); @@ -1671,7 +1672,7 @@ public virtual async Task DeleteJobAsync(string jobId, int? timeOutInS scope.Start(); try { - using HttpMessage message = CreateDeleteJobRequest(jobId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateDeleteJobRequest(jobId, timeOutInSeconds, ocpdate, force, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -1699,14 +1700,15 @@ public virtual async Task DeleteJobAsync(string jobId, int? timeOutInS /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// If true, the server will delete the Job even if the corresponding nodes have not fully processed the deletion. The default value is false. /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response DeleteJob(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response DeleteJob(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); @@ -1714,7 +1716,7 @@ public virtual Response DeleteJob(string jobId, int? timeOutInSeconds = null, Da scope.Start(); try { - using HttpMessage message = CreateDeleteJobRequest(jobId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateDeleteJobRequest(jobId, timeOutInSeconds, ocpdate, force, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -2379,6 +2381,7 @@ public virtual Response EnableJob(string jobId, int? timeOutInSeconds = null, Da /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// If true, the server will terminate the Job even if the corresponding nodes have not fully processed the termination. The default value is false. /// The content to send as the request conditions of the request. /// The cancellation token to use. /// is null. @@ -2391,14 +2394,14 @@ public virtual Response EnableJob(string jobId, int? timeOutInSeconds = null, Da /// state, they will remain in the active state. Once a Job is terminated, new /// Tasks cannot be added and any remaining active Tasks will not be scheduled. /// - /// - public virtual async Task TerminateJobAsync(string jobId, BatchJobTerminateContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// + public virtual async Task TerminateJobAsync(string jobId, BatchJobTerminateContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? force = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); using RequestContent content = parameters?.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await TerminateJobAsync(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); + Response response = await TerminateJobAsync(jobId, content, timeOutInSeconds, ocpdate, force, requestConditions, context).ConfigureAwait(false); return response; } @@ -2411,6 +2414,7 @@ public virtual async Task TerminateJobAsync(string jobId, BatchJobTerm /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// If true, the server will terminate the Job even if the corresponding nodes have not fully processed the termination. The default value is false. /// The content to send as the request conditions of the request. /// The cancellation token to use. /// is null. @@ -2423,14 +2427,14 @@ public virtual async Task TerminateJobAsync(string jobId, BatchJobTerm /// state, they will remain in the active state. Once a Job is terminated, new /// Tasks cannot be added and any remaining active Tasks will not be scheduled. /// - /// - public virtual Response TerminateJob(string jobId, BatchJobTerminateContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// + public virtual Response TerminateJob(string jobId, BatchJobTerminateContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? force = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); using RequestContent content = parameters?.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = TerminateJob(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context); + Response response = TerminateJob(jobId, content, timeOutInSeconds, ocpdate, force, requestConditions, context); return response; } @@ -2444,7 +2448,7 @@ public virtual Response TerminateJob(string jobId, BatchJobTerminateContent para /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -2457,14 +2461,15 @@ public virtual Response TerminateJob(string jobId, BatchJobTerminateContent para /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// If true, the server will terminate the Job even if the corresponding nodes have not fully processed the termination. The default value is false. /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task TerminateJobAsync(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task TerminateJobAsync(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); @@ -2472,7 +2477,7 @@ public virtual async Task TerminateJobAsync(string jobId, RequestConte scope.Start(); try { - using HttpMessage message = CreateTerminateJobRequest(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateTerminateJobRequest(jobId, content, timeOutInSeconds, ocpdate, force, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -2492,7 +2497,7 @@ public virtual async Task TerminateJobAsync(string jobId, RequestConte /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -2505,14 +2510,15 @@ public virtual async Task TerminateJobAsync(string jobId, RequestConte /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// If true, the server will terminate the Job even if the corresponding nodes have not fully processed the termination. The default value is false. /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response TerminateJob(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response TerminateJob(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); @@ -2520,7 +2526,7 @@ public virtual Response TerminateJob(string jobId, RequestContent content, int? scope.Start(); try { - using HttpMessage message = CreateTerminateJobRequest(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateTerminateJobRequest(jobId, content, timeOutInSeconds, ocpdate, force, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -2830,40 +2836,84 @@ public virtual Response GetJobTaskCounts(string jobId, int? timeOutInSeconds, Da } } - // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// Creates a Certificate to the specified Account. + /// The Certificate to be created. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// is null. + /// + public virtual async Task CreateCertificateAsync(BatchCertificate certificate, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNull(certificate, nameof(certificate)); + + using RequestContent content = certificate.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await CreateCertificateAsync(content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + return response; + } + + /// Creates a Certificate to the specified Account. + /// The Certificate to be created. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// is null. + /// + public virtual Response CreateCertificate(BatchCertificate certificate, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNull(certificate, nameof(certificate)); + + using RequestContent content = certificate.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = CreateCertificate(content, timeOutInSeconds, ocpdate, context); + return response; + } + /// - /// [Protocol Method] Deletes a Job Schedule from the specified Account. + /// [Protocol Method] Creates a Certificate to the specified Account. /// /// /// /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. /// /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// /// /// - /// The ID of the Job Schedule to delete. + /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// is an empty string, and was expected to be non-empty. + /// is null. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task DeleteJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task CreateCertificateAsync(RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) { - Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteJobSchedule"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateCertificate"); scope.Start(); try { - using HttpMessage message = CreateDeleteJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateCreateCertificateRequest(content, timeOutInSeconds, ocpdate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -2873,40 +2923,42 @@ public virtual async Task DeleteJobScheduleAsync(string jobScheduleId, } } - // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Deletes a Job Schedule from the specified Account. + /// [Protocol Method] Creates a Certificate to the specified Account. /// /// /// /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. /// /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// /// /// - /// The ID of the Job Schedule to delete. + /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// is an empty string, and was expected to be non-empty. + /// is null. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response DeleteJobSchedule(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response CreateCertificate(RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) { - Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteJobSchedule"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateCertificate"); scope.Start(); try { - using HttpMessage message = CreateDeleteJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateCreateCertificateRequest(content, timeOutInSeconds, ocpdate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -2916,94 +2968,41 @@ public virtual Response DeleteJobSchedule(string jobScheduleId, int? timeOutInSe } } - /// Gets information about the specified Job Schedule. - /// The ID of the Job Schedule to get. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// An OData $select clause. - /// An OData $expand clause. - /// The content to send as the request conditions of the request. - /// The cancellation token to use. - /// is null. - /// is an empty string, and was expected to be non-empty. - /// - public virtual async Task> GetJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) - { - Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - - RequestContext context = FromCancellationToken(cancellationToken); - Response response = await GetJobScheduleAsync(jobScheduleId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context).ConfigureAwait(false); - return Response.FromValue(BatchJobSchedule.FromResponse(response), response); - } - - /// Gets information about the specified Job Schedule. - /// The ID of the Job Schedule to get. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// An OData $select clause. - /// An OData $expand clause. - /// The content to send as the request conditions of the request. - /// The cancellation token to use. - /// is null. - /// is an empty string, and was expected to be non-empty. - /// - public virtual Response GetJobSchedule(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) - { - Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - - RequestContext context = FromCancellationToken(cancellationToken); - Response response = GetJobSchedule(jobScheduleId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); - return Response.FromValue(BatchJobSchedule.FromResponse(response), response); - } - + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Gets information about the specified Job Schedule. + /// [Protocol Method] Cancels a failed deletion of a Certificate from the specified Account. /// /// /// /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. /// /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// /// /// - /// The ID of the Job Schedule to get. + /// The algorithm used to derive the thumbprint parameter. This must be sha1. + /// The thumbprint of the Certificate being deleted. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// An OData $select clause. - /// An OData $expand clause. - /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// is an empty string, and was expected to be non-empty. + /// or is null. + /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task GetJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + /// + public virtual async Task CancelCertificateDeletionAsync(string thumbprintAlgorithm, string thumbprint, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) { - Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); + Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.GetJobSchedule"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.CancelCertificateDeletion"); scope.Start(); try { - using HttpMessage message = CreateGetJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + using HttpMessage message = CreateCancelCertificateDeletionRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpdate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -3013,46 +3012,41 @@ public virtual async Task GetJobScheduleAsync(string jobScheduleId, in } } + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Gets information about the specified Job Schedule. + /// [Protocol Method] Cancels a failed deletion of a Certificate from the specified Account. /// /// /// /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. /// /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// /// /// - /// The ID of the Job Schedule to get. + /// The algorithm used to derive the thumbprint parameter. This must be sha1. + /// The thumbprint of the Certificate being deleted. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// An OData $select clause. - /// An OData $expand clause. - /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// is an empty string, and was expected to be non-empty. + /// or is null. + /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response GetJobSchedule(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + /// + public virtual Response CancelCertificateDeletion(string thumbprintAlgorithm, string thumbprint, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) { - Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); + Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.GetJobSchedule"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.CancelCertificateDeletion"); scope.Start(); try { - using HttpMessage message = CreateGetJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + using HttpMessage message = CreateCancelCertificateDeletionRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpdate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -3062,8 +3056,9 @@ public virtual Response GetJobSchedule(string jobScheduleId, int? timeOutInSecon } } + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Updates the properties of the specified Job Schedule. + /// [Protocol Method] Deletes a Certificate from the specified Account. /// /// /// @@ -3072,31 +3067,30 @@ public virtual Response GetJobSchedule(string jobScheduleId, int? timeOutInSecon /// /// /// - /// The ID of the Job Schedule to update. - /// The content to send as the body of the request. + /// The algorithm used to derive the thumbprint parameter. This must be sha1. + /// The thumbprint of the Certificate to be deleted. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// is an empty string, and was expected to be non-empty. + /// or is null. + /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task UpdateJobScheduleAsync(string jobScheduleId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task DeleteCertificateAsync(string thumbprintAlgorithm, string thumbprint, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) { - Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); + Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.UpdateJobSchedule"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteCertificate"); scope.Start(); try { - using HttpMessage message = CreateUpdateJobScheduleRequest(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateDeleteCertificateRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpdate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -3106,8 +3100,9 @@ public virtual async Task UpdateJobScheduleAsync(string jobScheduleId, } } + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Updates the properties of the specified Job Schedule. + /// [Protocol Method] Deletes a Certificate from the specified Account. /// /// /// @@ -3116,31 +3111,30 @@ public virtual async Task UpdateJobScheduleAsync(string jobScheduleId, /// /// /// - /// The ID of the Job Schedule to update. - /// The content to send as the body of the request. + /// The algorithm used to derive the thumbprint parameter. This must be sha1. + /// The thumbprint of the Certificate to be deleted. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// is an empty string, and was expected to be non-empty. + /// or is null. + /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response UpdateJobSchedule(string jobScheduleId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response DeleteCertificate(string thumbprintAlgorithm, string thumbprint, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) { - Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); + Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.UpdateJobSchedule"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteCertificate"); scope.Start(); try { - using HttpMessage message = CreateUpdateJobScheduleRequest(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateDeleteCertificateRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpdate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -3150,72 +3144,56 @@ public virtual Response UpdateJobSchedule(string jobScheduleId, RequestContent c } } - /// Updates the properties of the specified Job Schedule. - /// The ID of the Job Schedule to update. - /// A Job Schedule with updated properties. + /// Gets information about the specified Certificate. + /// The algorithm used to derive the thumbprint parameter. This must be sha1. + /// The thumbprint of the Certificate to get. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// The content to send as the request conditions of the request. + /// An OData $select clause. /// The cancellation token to use. - /// or is null. - /// is an empty string, and was expected to be non-empty. - /// - /// This fully replaces all the updatable properties of the Job Schedule. For - /// example, if the schedule property is not specified with this request, then the - /// Batch service will remove the existing schedule. Changes to a Job Schedule only - /// impact Jobs created by the schedule after the update has taken place; currently - /// running Jobs are unaffected. - /// - /// - public virtual async Task ReplaceJobScheduleAsync(string jobScheduleId, BatchJobSchedule jobSchedule, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// + public virtual async Task> GetCertificateAsync(string thumbprintAlgorithm, string thumbprint, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, CancellationToken cancellationToken = default) { - Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - Argument.AssertNotNull(jobSchedule, nameof(jobSchedule)); + Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); + Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); - using RequestContent content = jobSchedule.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await ReplaceJobScheduleAsync(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); - return response; + Response response = await GetCertificateAsync(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpdate, select, context).ConfigureAwait(false); + return Response.FromValue(GetCertificateResponse.FromResponse(response), response); } - /// Updates the properties of the specified Job Schedule. - /// The ID of the Job Schedule to update. - /// A Job Schedule with updated properties. + /// Gets information about the specified Certificate. + /// The algorithm used to derive the thumbprint parameter. This must be sha1. + /// The thumbprint of the Certificate to get. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// The content to send as the request conditions of the request. + /// An OData $select clause. /// The cancellation token to use. - /// or is null. - /// is an empty string, and was expected to be non-empty. - /// - /// This fully replaces all the updatable properties of the Job Schedule. For - /// example, if the schedule property is not specified with this request, then the - /// Batch service will remove the existing schedule. Changes to a Job Schedule only - /// impact Jobs created by the schedule after the update has taken place; currently - /// running Jobs are unaffected. - /// - /// - public virtual Response ReplaceJobSchedule(string jobScheduleId, BatchJobSchedule jobSchedule, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// + public virtual Response GetCertificate(string thumbprintAlgorithm, string thumbprint, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, CancellationToken cancellationToken = default) { - Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - Argument.AssertNotNull(jobSchedule, nameof(jobSchedule)); + Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); + Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); - using RequestContent content = jobSchedule.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = ReplaceJobSchedule(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context); - return response; + Response response = GetCertificate(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpdate, select, context); + return Response.FromValue(GetCertificateResponse.FromResponse(response), response); } /// - /// [Protocol Method] Updates the properties of the specified Job Schedule. + /// [Protocol Method] Gets information about the specified Certificate. /// /// /// @@ -3224,36 +3202,36 @@ public virtual Response ReplaceJobSchedule(string jobScheduleId, BatchJobSchedul /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// - /// The ID of the Job Schedule to update. - /// The content to send as the body of the request. + /// The algorithm used to derive the thumbprint parameter. This must be sha1. + /// The thumbprint of the Certificate to get. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// The content to send as the request conditions of the request. + /// An OData $select clause. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// is an empty string, and was expected to be non-empty. + /// or is null. + /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task ReplaceJobScheduleAsync(string jobScheduleId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task GetCertificateAsync(string thumbprintAlgorithm, string thumbprint, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) { - Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); + Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.ReplaceJobSchedule"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetCertificate"); scope.Start(); try { - using HttpMessage message = CreateReplaceJobScheduleRequest(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateGetCertificateRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpdate, select, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -3264,7 +3242,7 @@ public virtual async Task ReplaceJobScheduleAsync(string jobScheduleId } /// - /// [Protocol Method] Updates the properties of the specified Job Schedule. + /// [Protocol Method] Gets information about the specified Certificate. /// /// /// @@ -3273,36 +3251,36 @@ public virtual async Task ReplaceJobScheduleAsync(string jobScheduleId /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// - /// The ID of the Job Schedule to update. - /// The content to send as the body of the request. + /// The algorithm used to derive the thumbprint parameter. This must be sha1. + /// The thumbprint of the Certificate to get. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// The content to send as the request conditions of the request. + /// An OData $select clause. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// is an empty string, and was expected to be non-empty. + /// or is null. + /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response ReplaceJobSchedule(string jobScheduleId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response GetCertificate(string thumbprintAlgorithm, string thumbprint, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) { - Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); + Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.ReplaceJobSchedule"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetCertificate"); scope.Start(); try { - using HttpMessage message = CreateReplaceJobScheduleRequest(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateGetCertificateRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpdate, select, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -3314,7 +3292,7 @@ public virtual Response ReplaceJobSchedule(string jobScheduleId, RequestContent // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Disables a Job Schedule. + /// [Protocol Method] Deletes a Job Schedule from the specified Account. /// /// /// @@ -3323,29 +3301,30 @@ public virtual Response ReplaceJobSchedule(string jobScheduleId, RequestContent /// /// /// - /// The ID of the Job Schedule to disable. + /// The ID of the Job Schedule to delete. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// If true, the server will delete the JobSchedule even if the corresponding nodes have not fully processed the deletion. The default value is false. /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task DisableJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task DeleteJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.DisableJobSchedule"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteJobSchedule"); scope.Start(); try { - using HttpMessage message = CreateDisableJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateDeleteJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, force, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -3357,7 +3336,7 @@ public virtual async Task DisableJobScheduleAsync(string jobScheduleId // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Disables a Job Schedule. + /// [Protocol Method] Deletes a Job Schedule from the specified Account. /// /// /// @@ -3366,29 +3345,30 @@ public virtual async Task DisableJobScheduleAsync(string jobScheduleId /// /// /// - /// The ID of the Job Schedule to disable. + /// The ID of the Job Schedule to delete. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// If true, the server will delete the JobSchedule even if the corresponding nodes have not fully processed the deletion. The default value is false. /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response DisableJobSchedule(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response DeleteJobSchedule(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.DisableJobSchedule"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteJobSchedule"); scope.Start(); try { - using HttpMessage message = CreateDisableJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateDeleteJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, force, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -3398,40 +3378,94 @@ public virtual Response DisableJobSchedule(string jobScheduleId, int? timeOutInS } } - // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method - /// - /// [Protocol Method] Enables a Job Schedule. + /// Gets information about the specified Job Schedule. + /// The ID of the Job Schedule to get. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// + public virtual async Task> GetJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await GetJobScheduleAsync(jobScheduleId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context).ConfigureAwait(false); + return Response.FromValue(BatchJobSchedule.FromResponse(response), response); + } + + /// Gets information about the specified Job Schedule. + /// The ID of the Job Schedule to get. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// + public virtual Response GetJobSchedule(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = GetJobSchedule(jobScheduleId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + return Response.FromValue(BatchJobSchedule.FromResponse(response), response); + } + + /// + /// [Protocol Method] Gets information about the specified Job Schedule. /// /// /// /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. /// /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// /// /// - /// The ID of the Job Schedule to enable. + /// The ID of the Job Schedule to get. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// An OData $select clause. + /// An OData $expand clause. /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task EnableJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task GetJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.EnableJobSchedule"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetJobSchedule"); scope.Start(); try { - using HttpMessage message = CreateEnableJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateGetJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -3441,40 +3475,46 @@ public virtual async Task EnableJobScheduleAsync(string jobScheduleId, } } - // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Enables a Job Schedule. + /// [Protocol Method] Gets information about the specified Job Schedule. /// /// /// /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. /// /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// /// /// - /// The ID of the Job Schedule to enable. + /// The ID of the Job Schedule to get. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// An OData $select clause. + /// An OData $expand clause. /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response EnableJobSchedule(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response GetJobSchedule(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.EnableJobSchedule"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetJobSchedule"); scope.Start(); try { - using HttpMessage message = CreateEnableJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateGetJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -3484,9 +3524,8 @@ public virtual Response EnableJobSchedule(string jobScheduleId, int? timeOutInSe } } - // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Terminates a Job Schedule. + /// [Protocol Method] Updates the properties of the specified Job Schedule. /// /// /// @@ -3495,7 +3534,8 @@ public virtual Response EnableJobSchedule(string jobScheduleId, int? timeOutInSe /// /// /// - /// The ID of the Job Schedule to terminates. + /// The ID of the Job Schedule to update. + /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the @@ -3504,20 +3544,21 @@ public virtual Response EnableJobSchedule(string jobScheduleId, int? timeOutInSe /// /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. + /// or is null. /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task TerminateJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task UpdateJobScheduleAsync(string jobScheduleId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.TerminateJobSchedule"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.UpdateJobSchedule"); scope.Start(); try { - using HttpMessage message = CreateTerminateJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateUpdateJobScheduleRequest(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -3527,9 +3568,8 @@ public virtual async Task TerminateJobScheduleAsync(string jobSchedule } } - // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Terminates a Job Schedule. + /// [Protocol Method] Updates the properties of the specified Job Schedule. /// /// /// @@ -3538,7 +3578,8 @@ public virtual async Task TerminateJobScheduleAsync(string jobSchedule /// /// /// - /// The ID of the Job Schedule to terminates. + /// The ID of the Job Schedule to update. + /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the @@ -3547,20 +3588,21 @@ public virtual async Task TerminateJobScheduleAsync(string jobSchedule /// /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. + /// or is null. /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response TerminateJobSchedule(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response UpdateJobSchedule(string jobScheduleId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.TerminateJobSchedule"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.UpdateJobSchedule"); scope.Start(); try { - using HttpMessage message = CreateTerminateJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateUpdateJobScheduleRequest(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -3570,50 +3612,72 @@ public virtual Response TerminateJobSchedule(string jobScheduleId, int? timeOutI } } - /// Creates a Job Schedule to the specified Account. - /// The Job Schedule to be created. + /// Updates the properties of the specified Job Schedule. + /// The ID of the Job Schedule to update. + /// A Job Schedule with updated properties. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// The content to send as the request conditions of the request. /// The cancellation token to use. - /// is null. - /// - public virtual async Task CreateJobScheduleAsync(BatchJobScheduleCreateContent jobSchedule, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// This fully replaces all the updatable properties of the Job Schedule. For + /// example, if the schedule property is not specified with this request, then the + /// Batch service will remove the existing schedule. Changes to a Job Schedule only + /// impact Jobs created by the schedule after the update has taken place; currently + /// running Jobs are unaffected. + /// + /// + public virtual async Task ReplaceJobScheduleAsync(string jobScheduleId, BatchJobSchedule jobSchedule, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); Argument.AssertNotNull(jobSchedule, nameof(jobSchedule)); using RequestContent content = jobSchedule.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await CreateJobScheduleAsync(content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + Response response = await ReplaceJobScheduleAsync(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); return response; } - /// Creates a Job Schedule to the specified Account. - /// The Job Schedule to be created. + /// Updates the properties of the specified Job Schedule. + /// The ID of the Job Schedule to update. + /// A Job Schedule with updated properties. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// The content to send as the request conditions of the request. /// The cancellation token to use. - /// is null. - /// - public virtual Response CreateJobSchedule(BatchJobScheduleCreateContent jobSchedule, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// This fully replaces all the updatable properties of the Job Schedule. For + /// example, if the schedule property is not specified with this request, then the + /// Batch service will remove the existing schedule. Changes to a Job Schedule only + /// impact Jobs created by the schedule after the update has taken place; currently + /// running Jobs are unaffected. + /// + /// + public virtual Response ReplaceJobSchedule(string jobScheduleId, BatchJobSchedule jobSchedule, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); Argument.AssertNotNull(jobSchedule, nameof(jobSchedule)); using RequestContent content = jobSchedule.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = CreateJobSchedule(content, timeOutInSeconds, ocpdate, context); + Response response = ReplaceJobSchedule(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context); return response; } /// - /// [Protocol Method] Creates a Job Schedule to the specified Account. + /// [Protocol Method] Updates the properties of the specified Job Schedule. /// /// /// @@ -3622,11 +3686,12 @@ public virtual Response CreateJobSchedule(BatchJobScheduleCreateContent jobSched /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// + /// The ID of the Job Schedule to update. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// @@ -3634,20 +3699,23 @@ public virtual Response CreateJobSchedule(BatchJobScheduleCreateContent jobSched /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. + /// or is null. + /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task CreateJobScheduleAsync(RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task ReplaceJobScheduleAsync(string jobScheduleId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateJobSchedule"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReplaceJobSchedule"); scope.Start(); try { - using HttpMessage message = CreateCreateJobScheduleRequest(content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateReplaceJobScheduleRequest(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -3658,7 +3726,7 @@ public virtual async Task CreateJobScheduleAsync(RequestContent conten } /// - /// [Protocol Method] Creates a Job Schedule to the specified Account. + /// [Protocol Method] Updates the properties of the specified Job Schedule. /// /// /// @@ -3667,11 +3735,12 @@ public virtual async Task CreateJobScheduleAsync(RequestContent conten /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// + /// The ID of the Job Schedule to update. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// @@ -3679,20 +3748,23 @@ public virtual async Task CreateJobScheduleAsync(RequestContent conten /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. + /// or is null. + /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response CreateJobSchedule(RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual Response ReplaceJobSchedule(string jobScheduleId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateJobSchedule"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReplaceJobSchedule"); scope.Start(); try { - using HttpMessage message = CreateCreateJobScheduleRequest(content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateReplaceJobScheduleRequest(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -3702,104 +3774,84 @@ public virtual Response CreateJobSchedule(RequestContent content, int? timeOutIn } } - /// Creates a Task to the specified Job. - /// The ID of the Job to which the Task is to be created. - /// The Task to be created. + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Disables a Job Schedule. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job Schedule to disable. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// The cancellation token to use. - /// or is null. - /// is an empty string, and was expected to be non-empty. - /// - /// The maximum lifetime of a Task from addition to completion is 180 days. If a - /// Task has not completed within 180 days of being added it will be terminated by - /// the Batch service and left in whatever state it was in at that time. - /// - /// - public virtual async Task CreateTaskAsync(string jobId, BatchTaskCreateContent task, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task DisableJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) { - Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNull(task, nameof(task)); + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - using RequestContent content = task.ToRequestContent(); - RequestContext context = FromCancellationToken(cancellationToken); - Response response = await CreateTaskAsync(jobId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); - return response; - } - - /// Creates a Task to the specified Job. - /// The ID of the Job to which the Task is to be created. - /// The Task to be created. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// The cancellation token to use. - /// or is null. - /// is an empty string, and was expected to be non-empty. - /// - /// The maximum lifetime of a Task from addition to completion is 180 days. If a - /// Task has not completed within 180 days of being added it will be terminated by - /// the Batch service and left in whatever state it was in at that time. - /// - /// - public virtual Response CreateTask(string jobId, BatchTaskCreateContent task, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) - { - Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNull(task, nameof(task)); - - using RequestContent content = task.ToRequestContent(); - RequestContext context = FromCancellationToken(cancellationToken); - Response response = CreateTask(jobId, content, timeOutInSeconds, ocpdate, context); - return response; + using var scope = ClientDiagnostics.CreateScope("BatchClient.DisableJobSchedule"); + scope.Start(); + try + { + using HttpMessage message = CreateDisableJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } } + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Creates a Task to the specified Job. + /// [Protocol Method] Disables a Job Schedule. /// /// /// /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. /// /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// /// /// - /// The ID of the Job to which the Task is to be created. - /// The content to send as the body of the request. + /// The ID of the Job Schedule to disable. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// is an empty string, and was expected to be non-empty. + /// is null. + /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task CreateTaskAsync(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual Response DisableJobSchedule(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) { - Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateTask"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.DisableJobSchedule"); scope.Start(); try { - using HttpMessage message = CreateCreateTaskRequest(jobId, content, timeOutInSeconds, ocpdate, context); - return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + using HttpMessage message = CreateDisableJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); } catch (Exception e) { @@ -3808,46 +3860,41 @@ public virtual async Task CreateTaskAsync(string jobId, RequestContent } } + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Creates a Task to the specified Job. + /// [Protocol Method] Enables a Job Schedule. /// /// /// /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. /// /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// /// /// - /// The ID of the Job to which the Task is to be created. - /// The content to send as the body of the request. + /// The ID of the Job Schedule to enable. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// is an empty string, and was expected to be non-empty. + /// is null. + /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response CreateTask(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task EnableJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) { - Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateTask"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.EnableJobSchedule"); scope.Start(); try { - using HttpMessage message = CreateCreateTaskRequest(jobId, content, timeOutInSeconds, ocpdate, context); - return _pipeline.ProcessMessage(message, context); + using HttpMessage message = CreateEnableJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) { @@ -3856,126 +3903,41 @@ public virtual Response CreateTask(string jobId, RequestContent content, int? ti } } - /// Adds a collection of Tasks to the specified Job. - /// The ID of the Job to which the Task collection is to be added. - /// The Tasks to be added. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// The cancellation token to use. - /// or is null. - /// is an empty string, and was expected to be non-empty. - /// - /// Note that each Task must have a unique ID. The Batch service may not return the - /// results for each Task in the same order the Tasks were submitted in this - /// request. If the server times out or the connection is closed during the - /// request, the request may have been partially or fully processed, or not at all. - /// In such cases, the user should re-issue the request. Note that it is up to the - /// user to correctly handle failures when re-issuing a request. For example, you - /// should use the same Task IDs during a retry so that if the prior operation - /// succeeded, the retry will not create extra Tasks unexpectedly. If the response - /// contains any Tasks which failed to add, a client can retry the request. In a - /// retry, it is most efficient to resubmit only Tasks that failed to add, and to - /// omit Tasks that were successfully added on the first attempt. The maximum - /// lifetime of a Task from addition to completion is 180 days. If a Task has not - /// completed within 180 days of being added it will be terminated by the Batch - /// service and left in whatever state it was in at that time. - /// - /// - public virtual async Task> CreateTaskCollectionAsync(string jobId, BatchTaskGroup taskCollection, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) - { - Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNull(taskCollection, nameof(taskCollection)); - - using RequestContent content = taskCollection.ToRequestContent(); - RequestContext context = FromCancellationToken(cancellationToken); - Response response = await CreateTaskCollectionAsync(jobId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); - return Response.FromValue(BatchTaskAddCollectionResult.FromResponse(response), response); - } - - /// Adds a collection of Tasks to the specified Job. - /// The ID of the Job to which the Task collection is to be added. - /// The Tasks to be added. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// The cancellation token to use. - /// or is null. - /// is an empty string, and was expected to be non-empty. - /// - /// Note that each Task must have a unique ID. The Batch service may not return the - /// results for each Task in the same order the Tasks were submitted in this - /// request. If the server times out or the connection is closed during the - /// request, the request may have been partially or fully processed, or not at all. - /// In such cases, the user should re-issue the request. Note that it is up to the - /// user to correctly handle failures when re-issuing a request. For example, you - /// should use the same Task IDs during a retry so that if the prior operation - /// succeeded, the retry will not create extra Tasks unexpectedly. If the response - /// contains any Tasks which failed to add, a client can retry the request. In a - /// retry, it is most efficient to resubmit only Tasks that failed to add, and to - /// omit Tasks that were successfully added on the first attempt. The maximum - /// lifetime of a Task from addition to completion is 180 days. If a Task has not - /// completed within 180 days of being added it will be terminated by the Batch - /// service and left in whatever state it was in at that time. - /// - /// - public virtual Response CreateTaskCollection(string jobId, BatchTaskGroup taskCollection, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) - { - Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNull(taskCollection, nameof(taskCollection)); - - using RequestContent content = taskCollection.ToRequestContent(); - RequestContext context = FromCancellationToken(cancellationToken); - Response response = CreateTaskCollection(jobId, content, timeOutInSeconds, ocpdate, context); - return Response.FromValue(BatchTaskAddCollectionResult.FromResponse(response), response); - } - + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Adds a collection of Tasks to the specified Job. + /// [Protocol Method] Enables a Job Schedule. /// /// /// /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. /// /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// /// /// - /// The ID of the Job to which the Task collection is to be added. - /// The content to send as the body of the request. + /// The ID of the Job Schedule to enable. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// is an empty string, and was expected to be non-empty. + /// is null. + /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task CreateTaskCollectionAsync(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual Response EnableJobSchedule(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) { - Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateTaskCollection"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.EnableJobSchedule"); scope.Start(); try { - using HttpMessage message = CreateCreateTaskCollectionRequest(jobId, content, timeOutInSeconds, ocpdate, context); - return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + using HttpMessage message = CreateEnableJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); } catch (Exception e) { @@ -3984,46 +3946,42 @@ public virtual async Task CreateTaskCollectionAsync(string jobId, Requ } } + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Adds a collection of Tasks to the specified Job. + /// [Protocol Method] Terminates a Job Schedule. /// /// /// /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. /// /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// /// /// - /// The ID of the Job to which the Task collection is to be added. - /// The content to send as the body of the request. + /// The ID of the Job Schedule to terminates. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// If true, the server will terminate the JobSchedule even if the corresponding nodes have not fully processed the termination. The default value is false. + /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// is an empty string, and was expected to be non-empty. + /// is null. + /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response CreateTaskCollection(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task TerminateJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) { - Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateTaskCollection"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.TerminateJobSchedule"); scope.Start(); try { - using HttpMessage message = CreateCreateTaskCollectionRequest(jobId, content, timeOutInSeconds, ocpdate, context); - return _pipeline.ProcessMessage(message, context); + using HttpMessage message = CreateTerminateJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, force, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) { @@ -4034,7 +3992,7 @@ public virtual Response CreateTaskCollection(string jobId, RequestContent conten // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Deletes a Task from the specified Job. + /// [Protocol Method] Terminates a Job Schedule. /// /// /// @@ -4043,32 +4001,31 @@ public virtual Response CreateTaskCollection(string jobId, RequestContent conten /// /// /// - /// The ID of the Job from which to delete the Task. - /// The ID of the Task to delete. + /// The ID of the Job Schedule to terminates. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// If true, the server will terminate the JobSchedule even if the corresponding nodes have not fully processed the termination. The default value is false. /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// or is an empty string, and was expected to be non-empty. + /// is null. + /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task DeleteTaskAsync(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response TerminateJobSchedule(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) { - Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteTask"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.TerminateJobSchedule"); scope.Start(); try { - using HttpMessage message = CreateDeleteTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); - return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + using HttpMessage message = CreateTerminateJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, force, requestConditions, context); + return _pipeline.ProcessMessage(message, context); } catch (Exception e) { @@ -4077,115 +4034,50 @@ public virtual async Task DeleteTaskAsync(string jobId, string taskId, } } - // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method - /// - /// [Protocol Method] Deletes a Task from the specified Job. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// The ID of the Job from which to delete the Task. - /// The ID of the Task to delete. + /// Creates a Job Schedule to the specified Account. + /// The Job Schedule to be created. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// The content to send as the request conditions of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The response returned from the service. - /// - public virtual Response DeleteTask(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) - { - Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); - - using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteTask"); - scope.Start(); - try - { - using HttpMessage message = CreateDeleteTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); - return _pipeline.ProcessMessage(message, context); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } - } - - /// Gets information about the specified Task. - /// The ID of the Job that contains the Task. - /// The ID of the Task to get information about. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// An OData $select clause. - /// An OData $expand clause. - /// The content to send as the request conditions of the request. /// The cancellation token to use. - /// or is null. - /// or is an empty string, and was expected to be non-empty. - /// - /// For multi-instance Tasks, information such as affinityId, executionInfo and - /// nodeInfo refer to the primary Task. Use the list subtasks API to retrieve - /// information about subtasks. - /// - /// - public virtual async Task> GetTaskAsync(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// is null. + /// + public virtual async Task CreateJobScheduleAsync(BatchJobScheduleCreateContent jobSchedule, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) { - Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNull(jobSchedule, nameof(jobSchedule)); + using RequestContent content = jobSchedule.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await GetTaskAsync(jobId, taskId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context).ConfigureAwait(false); - return Response.FromValue(BatchTask.FromResponse(response), response); + Response response = await CreateJobScheduleAsync(content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + return response; } - /// Gets information about the specified Task. - /// The ID of the Job that contains the Task. - /// The ID of the Task to get information about. + /// Creates a Job Schedule to the specified Account. + /// The Job Schedule to be created. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// An OData $select clause. - /// An OData $expand clause. - /// The content to send as the request conditions of the request. /// The cancellation token to use. - /// or is null. - /// or is an empty string, and was expected to be non-empty. - /// - /// For multi-instance Tasks, information such as affinityId, executionInfo and - /// nodeInfo refer to the primary Task. Use the list subtasks API to retrieve - /// information about subtasks. - /// - /// - public virtual Response GetTask(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// is null. + /// + public virtual Response CreateJobSchedule(BatchJobScheduleCreateContent jobSchedule, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) { - Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNull(jobSchedule, nameof(jobSchedule)); + using RequestContent content = jobSchedule.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = GetTask(jobId, taskId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); - return Response.FromValue(BatchTask.FromResponse(response), response); + Response response = CreateJobSchedule(content, timeOutInSeconds, ocpdate, context); + return response; } /// - /// [Protocol Method] Gets information about the specified Task. + /// [Protocol Method] Creates a Job Schedule to the specified Account. /// /// /// @@ -4194,38 +4086,32 @@ public virtual Response GetTask(string jobId, string taskId, int? tim /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// - /// The ID of the Job that contains the Task. - /// The ID of the Task to get information about. + /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// An OData $select clause. - /// An OData $expand clause. - /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// or is an empty string, and was expected to be non-empty. + /// is null. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task GetTaskAsync(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + /// + public virtual async Task CreateJobScheduleAsync(RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) { - Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.GetTask"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateJobSchedule"); scope.Start(); try { - using HttpMessage message = CreateGetTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + using HttpMessage message = CreateCreateJobScheduleRequest(content, timeOutInSeconds, ocpdate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -4236,7 +4122,7 @@ public virtual async Task GetTaskAsync(string jobId, string taskId, in } /// - /// [Protocol Method] Gets information about the specified Task. + /// [Protocol Method] Creates a Job Schedule to the specified Account. /// /// /// @@ -4245,38 +4131,32 @@ public virtual async Task GetTaskAsync(string jobId, string taskId, in /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// - /// The ID of the Job that contains the Task. - /// The ID of the Task to get information about. + /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// An OData $select clause. - /// An OData $expand clause. - /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// or is an empty string, and was expected to be non-empty. + /// is null. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response GetTask(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + /// + public virtual Response CreateJobSchedule(RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) { - Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.GetTask"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateJobSchedule"); scope.Start(); try { - using HttpMessage message = CreateGetTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + using HttpMessage message = CreateCreateJobScheduleRequest(content, timeOutInSeconds, ocpdate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -4286,62 +4166,66 @@ public virtual Response GetTask(string jobId, string taskId, int? timeOutInSecon } } - /// Updates the properties of the specified Task. - /// The ID of the Job containing the Task. - /// The ID of the Task to update. - /// The Task to update. + /// Creates a Task to the specified Job. + /// The ID of the Job to which the Task is to be created. + /// The Task to be created. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// The content to send as the request conditions of the request. /// The cancellation token to use. - /// , or is null. - /// or is an empty string, and was expected to be non-empty. - /// - public virtual async Task ReplaceTaskAsync(string jobId, string taskId, BatchTask task, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// The maximum lifetime of a Task from addition to completion is 180 days. If a + /// Task has not completed within 180 days of being added it will be terminated by + /// the Batch service and left in whatever state it was in at that time. + /// + /// + public virtual async Task CreateTaskAsync(string jobId, BatchTaskCreateContent task, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); Argument.AssertNotNull(task, nameof(task)); using RequestContent content = task.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await ReplaceTaskAsync(jobId, taskId, content, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); + Response response = await CreateTaskAsync(jobId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); return response; } - /// Updates the properties of the specified Task. - /// The ID of the Job containing the Task. - /// The ID of the Task to update. - /// The Task to update. + /// Creates a Task to the specified Job. + /// The ID of the Job to which the Task is to be created. + /// The Task to be created. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// The content to send as the request conditions of the request. /// The cancellation token to use. - /// , or is null. - /// or is an empty string, and was expected to be non-empty. - /// - public virtual Response ReplaceTask(string jobId, string taskId, BatchTask task, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// The maximum lifetime of a Task from addition to completion is 180 days. If a + /// Task has not completed within 180 days of being added it will be terminated by + /// the Batch service and left in whatever state it was in at that time. + /// + /// + public virtual Response CreateTask(string jobId, BatchTaskCreateContent task, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); Argument.AssertNotNull(task, nameof(task)); using RequestContent content = task.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = ReplaceTask(jobId, taskId, content, timeOutInSeconds, ocpdate, requestConditions, context); + Response response = CreateTask(jobId, content, timeOutInSeconds, ocpdate, context); return response; } /// - /// [Protocol Method] Updates the properties of the specified Task. + /// [Protocol Method] Creates a Task to the specified Job. /// /// /// @@ -4350,13 +4234,12 @@ public virtual Response ReplaceTask(string jobId, string taskId, BatchTask task, /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// - /// The ID of the Job containing the Task. - /// The ID of the Task to update. + /// The ID of the Job to which the Task is to be created. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// @@ -4364,24 +4247,22 @@ public virtual Response ReplaceTask(string jobId, string taskId, BatchTask task, /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// , or is null. - /// or is an empty string, and was expected to be non-empty. + /// or is null. + /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task ReplaceTaskAsync(string jobId, string taskId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task CreateTaskAsync(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.ReplaceTask"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateTask"); scope.Start(); try { - using HttpMessage message = CreateReplaceTaskRequest(jobId, taskId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateCreateTaskRequest(jobId, content, timeOutInSeconds, ocpdate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -4392,7 +4273,7 @@ public virtual async Task ReplaceTaskAsync(string jobId, string taskId } /// - /// [Protocol Method] Updates the properties of the specified Task. + /// [Protocol Method] Creates a Task to the specified Job. /// /// /// @@ -4401,13 +4282,12 @@ public virtual async Task ReplaceTaskAsync(string jobId, string taskId /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// - /// The ID of the Job containing the Task. - /// The ID of the Task to update. + /// The ID of the Job to which the Task is to be created. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// @@ -4415,24 +4295,22 @@ public virtual async Task ReplaceTaskAsync(string jobId, string taskId /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// , or is null. - /// or is an empty string, and was expected to be non-empty. + /// or is null. + /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response ReplaceTask(string jobId, string taskId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response CreateTask(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.ReplaceTask"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateTask"); scope.Start(); try { - using HttpMessage message = CreateReplaceTaskRequest(jobId, taskId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateCreateTaskRequest(jobId, content, timeOutInSeconds, ocpdate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -4442,133 +4320,125 @@ public virtual Response ReplaceTask(string jobId, string taskId, RequestContent } } - // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method - /// - /// [Protocol Method] Terminates the specified Task. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// The ID of the Job containing the Task. - /// The ID of the Task to terminate. + /// Adds a collection of Tasks to the specified Job. + /// The ID of the Job to which the Task collection is to be added. + /// The Tasks to be added. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// The content to send as the request conditions of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The response returned from the service. - /// - public virtual async Task TerminateTaskAsync(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) - { - Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); - - using var scope = ClientDiagnostics.CreateScope("BatchClient.TerminateTask"); - scope.Start(); - try - { - using HttpMessage message = CreateTerminateTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); - return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// Note that each Task must have a unique ID. The Batch service may not return the + /// results for each Task in the same order the Tasks were submitted in this + /// request. If the server times out or the connection is closed during the + /// request, the request may have been partially or fully processed, or not at all. + /// In such cases, the user should re-issue the request. Note that it is up to the + /// user to correctly handle failures when re-issuing a request. For example, you + /// should use the same Task IDs during a retry so that if the prior operation + /// succeeded, the retry will not create extra Tasks unexpectedly. If the response + /// contains any Tasks which failed to add, a client can retry the request. In a + /// retry, it is most efficient to resubmit only Tasks that failed to add, and to + /// omit Tasks that were successfully added on the first attempt. The maximum + /// lifetime of a Task from addition to completion is 180 days. If a Task has not + /// completed within 180 days of being added it will be terminated by the Batch + /// service and left in whatever state it was in at that time. + /// + /// + public virtual async Task> CreateTaskCollectionAsync(string jobId, BatchTaskGroup taskCollection, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNull(taskCollection, nameof(taskCollection)); + + using RequestContent content = taskCollection.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await CreateTaskCollectionAsync(jobId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + return Response.FromValue(BatchTaskAddCollectionResult.FromResponse(response), response); } - // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method - /// - /// [Protocol Method] Terminates the specified Task. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// - /// The ID of the Job containing the Task. - /// The ID of the Task to terminate. + /// Adds a collection of Tasks to the specified Job. + /// The ID of the Job to which the Task collection is to be added. + /// The Tasks to be added. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// The content to send as the request conditions of the request. - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The response returned from the service. - /// - public virtual Response TerminateTask(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// Note that each Task must have a unique ID. The Batch service may not return the + /// results for each Task in the same order the Tasks were submitted in this + /// request. If the server times out or the connection is closed during the + /// request, the request may have been partially or fully processed, or not at all. + /// In such cases, the user should re-issue the request. Note that it is up to the + /// user to correctly handle failures when re-issuing a request. For example, you + /// should use the same Task IDs during a retry so that if the prior operation + /// succeeded, the retry will not create extra Tasks unexpectedly. If the response + /// contains any Tasks which failed to add, a client can retry the request. In a + /// retry, it is most efficient to resubmit only Tasks that failed to add, and to + /// omit Tasks that were successfully added on the first attempt. The maximum + /// lifetime of a Task from addition to completion is 180 days. If a Task has not + /// completed within 180 days of being added it will be terminated by the Batch + /// service and left in whatever state it was in at that time. + /// + /// + public virtual Response CreateTaskCollection(string jobId, BatchTaskGroup taskCollection, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNull(taskCollection, nameof(taskCollection)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.TerminateTask"); - scope.Start(); - try - { - using HttpMessage message = CreateTerminateTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); - return _pipeline.ProcessMessage(message, context); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } + using RequestContent content = taskCollection.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = CreateTaskCollection(jobId, content, timeOutInSeconds, ocpdate, context); + return Response.FromValue(BatchTaskAddCollectionResult.FromResponse(response), response); } - // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Reactivates a Task, allowing it to run again even if its retry count has been - /// exhausted. + /// [Protocol Method] Adds a collection of Tasks to the specified Job. /// /// /// /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. /// /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// /// /// - /// The ID of the Job containing the Task. - /// The ID of the Task to reactivate. + /// The ID of the Job to which the Task collection is to be added. + /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// or is an empty string, and was expected to be non-empty. + /// or is null. + /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task ReactivateTaskAsync(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task CreateTaskCollectionAsync(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.ReactivateTask"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateTaskCollection"); scope.Start(); try { - using HttpMessage message = CreateReactivateTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateCreateTaskCollectionRequest(jobId, content, timeOutInSeconds, ocpdate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -4578,43 +4448,45 @@ public virtual async Task ReactivateTaskAsync(string jobId, string tas } } - // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Reactivates a Task, allowing it to run again even if its retry count has been - /// exhausted. + /// [Protocol Method] Adds a collection of Tasks to the specified Job. /// /// /// /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. /// /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// /// /// - /// The ID of the Job containing the Task. - /// The ID of the Task to reactivate. + /// The ID of the Job to which the Task collection is to be added. + /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// or is null. - /// or is an empty string, and was expected to be non-empty. + /// or is null. + /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response ReactivateTask(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response CreateTaskCollection(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.ReactivateTask"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateTaskCollection"); scope.Start(); try { - using HttpMessage message = CreateReactivateTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateCreateTaskCollectionRequest(jobId, content, timeOutInSeconds, ocpdate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -4626,7 +4498,7 @@ public virtual Response ReactivateTask(string jobId, string taskId, int? timeOut // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Deletes the specified Task file from the Compute Node where the Task ran. + /// [Protocol Method] Deletes a Task from the specified Job. /// /// /// @@ -4635,38 +4507,31 @@ public virtual Response ReactivateTask(string jobId, string taskId, int? timeOut /// /// /// - /// The ID of the Job that contains the Task. - /// The ID of the Task whose file you want to retrieve. - /// The path to the Task file that you want to get the content of. + /// The ID of the Job from which to delete the Task. + /// The ID of the Task to delete. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// - /// Whether to delete children of a directory. If the filePath parameter represents - /// a directory instead of a file, you can set recursive to true to delete the - /// directory and all of the files and subdirectories in it. If recursive is false - /// then the directory must be empty or deletion will fail. - /// + /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// , or is null. - /// , or is an empty string, and was expected to be non-empty. + /// or is null. + /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task DeleteTaskFileAsync(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? recursive = null, RequestContext context = null) + /// + public virtual async Task DeleteTaskAsync(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); - Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteTaskFile"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteTask"); scope.Start(); try { - using HttpMessage message = CreateDeleteTaskFileRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, recursive, context); + using HttpMessage message = CreateDeleteTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -4678,7 +4543,7 @@ public virtual async Task DeleteTaskFileAsync(string jobId, string tas // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Deletes the specified Task file from the Compute Node where the Task ran. + /// [Protocol Method] Deletes a Task from the specified Job. /// /// /// @@ -4687,38 +4552,31 @@ public virtual async Task DeleteTaskFileAsync(string jobId, string tas /// /// /// - /// The ID of the Job that contains the Task. - /// The ID of the Task whose file you want to retrieve. - /// The path to the Task file that you want to get the content of. + /// The ID of the Job from which to delete the Task. + /// The ID of the Task to delete. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// - /// Whether to delete children of a directory. If the filePath parameter represents - /// a directory instead of a file, you can set recursive to true to delete the - /// directory and all of the files and subdirectories in it. If recursive is false - /// then the directory must be empty or deletion will fail. - /// + /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// , or is null. - /// , or is an empty string, and was expected to be non-empty. + /// or is null. + /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response DeleteTaskFile(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? recursive = null, RequestContext context = null) + /// + public virtual Response DeleteTask(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); - Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteTaskFile"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteTask"); scope.Start(); try { - using HttpMessage message = CreateDeleteTaskFileRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, recursive, context); + using HttpMessage message = CreateDeleteTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -4728,68 +4586,70 @@ public virtual Response DeleteTaskFile(string jobId, string taskId, string fileP } } - /// Returns the content of the specified Task file. + /// Gets information about the specified Task. /// The ID of the Job that contains the Task. - /// The ID of the Task whose file you want to retrieve. - /// The path to the Task file that you want to get the content of. + /// The ID of the Task to get information about. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// - /// The byte range to be retrieved. The default is to retrieve the entire file. The - /// format is bytes=startRange-endRange. - /// + /// An OData $select clause. + /// An OData $expand clause. /// The content to send as the request conditions of the request. /// The cancellation token to use. - /// , or is null. - /// , or is an empty string, and was expected to be non-empty. - /// - public virtual async Task> GetTaskFileAsync(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, string ocpRange = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// + /// For multi-instance Tasks, information such as affinityId, executionInfo and + /// nodeInfo refer to the primary Task. Use the list subtasks API to retrieve + /// information about subtasks. + /// + /// + public virtual async Task> GetTaskAsync(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); - Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await GetTaskFileAsync(jobId, taskId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context).ConfigureAwait(false); - return Response.FromValue(response.Content, response); + Response response = await GetTaskAsync(jobId, taskId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context).ConfigureAwait(false); + return Response.FromValue(BatchTask.FromResponse(response), response); } - /// Returns the content of the specified Task file. + /// Gets information about the specified Task. /// The ID of the Job that contains the Task. - /// The ID of the Task whose file you want to retrieve. - /// The path to the Task file that you want to get the content of. + /// The ID of the Task to get information about. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// - /// The byte range to be retrieved. The default is to retrieve the entire file. The - /// format is bytes=startRange-endRange. - /// + /// An OData $select clause. + /// An OData $expand clause. /// The content to send as the request conditions of the request. /// The cancellation token to use. - /// , or is null. - /// , or is an empty string, and was expected to be non-empty. - /// - public virtual Response GetTaskFile(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, string ocpRange = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) - { - Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); - Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); - - RequestContext context = FromCancellationToken(cancellationToken); - Response response = GetTaskFile(jobId, taskId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context); - return Response.FromValue(response.Content, response); + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// + /// For multi-instance Tasks, information such as affinityId, executionInfo and + /// nodeInfo refer to the primary Task. Use the list subtasks API to retrieve + /// information about subtasks. + /// + /// + public virtual Response GetTask(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = GetTask(jobId, taskId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + return Response.FromValue(BatchTask.FromResponse(response), response); } /// - /// [Protocol Method] Returns the content of the specified Task file. + /// [Protocol Method] Gets information about the specified Task. /// /// /// @@ -4798,51 +4658,38 @@ public virtual Response GetTaskFile(string jobId, string taskId, str /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Job that contains the Task. - /// The ID of the Task whose file you want to retrieve. - /// The path to the Task file that you want to get the content of. + /// The ID of the Task to get information about. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// - /// The byte range to be retrieved. The default is to retrieve the entire file. The - /// format is bytes=startRange-endRange. - /// + /// An OData $select clause. + /// An OData $expand clause. /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// , or is null. - /// , or is an empty string, and was expected to be non-empty. + /// or is null. + /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task GetTaskFileAsync(string jobId, string taskId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, string ocpRange, RequestConditions requestConditions, RequestContext context) + /// + public virtual async Task GetTaskAsync(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); - Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); - - if (requestConditions?.IfMatch is not null) - { - throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-Match header for this operation."); - } - if (requestConditions?.IfNoneMatch is not null) - { - throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-None-Match header for this operation."); - } - using var scope = ClientDiagnostics.CreateScope("BatchClient.GetTaskFile"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetTask"); scope.Start(); try { - using HttpMessage message = CreateGetTaskFileRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context); + using HttpMessage message = CreateGetTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -4853,7 +4700,7 @@ public virtual async Task GetTaskFileAsync(string jobId, string taskId } /// - /// [Protocol Method] Returns the content of the specified Task file. + /// [Protocol Method] Gets information about the specified Task. /// /// /// @@ -4862,51 +4709,38 @@ public virtual async Task GetTaskFileAsync(string jobId, string taskId /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Job that contains the Task. - /// The ID of the Task whose file you want to retrieve. - /// The path to the Task file that you want to get the content of. + /// The ID of the Task to get information about. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// - /// The byte range to be retrieved. The default is to retrieve the entire file. The - /// format is bytes=startRange-endRange. - /// + /// An OData $select clause. + /// An OData $expand clause. /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// , or is null. - /// , or is an empty string, and was expected to be non-empty. + /// or is null. + /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response GetTaskFile(string jobId, string taskId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, string ocpRange, RequestConditions requestConditions, RequestContext context) + /// + public virtual Response GetTask(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); - Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); - - if (requestConditions?.IfMatch is not null) - { - throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-Match header for this operation."); - } - if (requestConditions?.IfNoneMatch is not null) - { - throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-None-Match header for this operation."); - } - using var scope = ClientDiagnostics.CreateScope("BatchClient.GetTaskFile"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetTask"); scope.Start(); try { - using HttpMessage message = CreateGetTaskFileRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context); + using HttpMessage message = CreateGetTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -4916,20 +4750,78 @@ public virtual Response GetTaskFile(string jobId, string taskId, string filePath } } - // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// Updates the properties of the specified Task. + /// The ID of the Job containing the Task. + /// The ID of the Task to update. + /// The Task to update. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// + public virtual async Task ReplaceTaskAsync(string jobId, string taskId, BatchTask task, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNull(task, nameof(task)); + + using RequestContent content = task.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await ReplaceTaskAsync(jobId, taskId, content, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); + return response; + } + + /// Updates the properties of the specified Task. + /// The ID of the Job containing the Task. + /// The ID of the Task to update. + /// The Task to update. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// + public virtual Response ReplaceTask(string jobId, string taskId, BatchTask task, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNull(task, nameof(task)); + + using RequestContent content = task.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = ReplaceTask(jobId, taskId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return response; + } + /// - /// [Protocol Method] Gets the properties of the specified Task file. + /// [Protocol Method] Updates the properties of the specified Task. /// /// /// /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. /// /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// /// /// - /// The ID of the Job that contains the Task. - /// The ID of the Task whose file you want to retrieve. - /// The path to the Task file that you want to get the content of. + /// The ID of the Job containing the Task. + /// The ID of the Task to update. + /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the @@ -4938,31 +4830,74 @@ public virtual Response GetTaskFile(string jobId, string taskId, string filePath /// /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// , or is null. - /// , or is an empty string, and was expected to be non-empty. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - internal virtual async Task GetTaskFilePropertiesInternalAsync(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task ReplaceTaskAsync(string jobId, string taskId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); - Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + Argument.AssertNotNull(content, nameof(content)); - if (requestConditions?.IfMatch is not null) + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReplaceTask"); + scope.Start(); + try { - throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-Match header for this operation."); + using HttpMessage message = CreateReplaceTaskRequest(jobId, taskId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } - if (requestConditions?.IfNoneMatch is not null) + catch (Exception e) { - throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-None-Match header for this operation."); + scope.Failed(e); + throw; } + } - using var scope = ClientDiagnostics.CreateScope("BatchClient.GetTaskFilePropertiesInternal"); + /// + /// [Protocol Method] Updates the properties of the specified Task. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job containing the Task. + /// The ID of the Task to update. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response ReplaceTask(string jobId, string taskId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReplaceTask"); scope.Start(); try { - using HttpMessage message = CreateGetTaskFilePropertiesInternalRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, requestConditions, context); - return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + using HttpMessage message = CreateReplaceTaskRequest(jobId, taskId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); } catch (Exception e) { @@ -4973,7 +4908,7 @@ internal virtual async Task GetTaskFilePropertiesInternalAsync(string // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Gets the properties of the specified Task file. + /// [Protocol Method] Terminates the specified Task. /// /// /// @@ -4982,9 +4917,8 @@ internal virtual async Task GetTaskFilePropertiesInternalAsync(string /// /// /// - /// The ID of the Job that contains the Task. - /// The ID of the Task whose file you want to retrieve. - /// The path to the Task file that you want to get the content of. + /// The ID of the Job containing the Task. + /// The ID of the Task to terminate. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the @@ -4993,30 +4927,984 @@ internal virtual async Task GetTaskFilePropertiesInternalAsync(string /// /// The content to send as the request conditions of the request. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// , or is null. - /// , or is an empty string, and was expected to be non-empty. + /// or is null. + /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - internal virtual Response GetTaskFilePropertiesInternal(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task TerminateTaskAsync(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); - Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); - if (requestConditions?.IfMatch is not null) + using var scope = ClientDiagnostics.CreateScope("BatchClient.TerminateTask"); + scope.Start(); + try { - throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-Match header for this operation."); + using HttpMessage message = CreateTerminateTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } - if (requestConditions?.IfNoneMatch is not null) + catch (Exception e) { - throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-None-Match header for this operation."); + scope.Failed(e); + throw; } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Terminates the specified Task. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job containing the Task. + /// The ID of the Task to terminate. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response TerminateTask(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.TerminateTask"); + scope.Start(); + try + { + using HttpMessage message = CreateTerminateTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Reactivates a Task, allowing it to run again even if its retry count has been + /// exhausted. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job containing the Task. + /// The ID of the Task to reactivate. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task ReactivateTaskAsync(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReactivateTask"); + scope.Start(); + try + { + using HttpMessage message = CreateReactivateTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Reactivates a Task, allowing it to run again even if its retry count has been + /// exhausted. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job containing the Task. + /// The ID of the Task to reactivate. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response ReactivateTask(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReactivateTask"); + scope.Start(); + try + { + using HttpMessage message = CreateReactivateTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Deletes the specified Task file from the Compute Node where the Task ran. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job that contains the Task. + /// The ID of the Task whose file you want to retrieve. + /// The path to the Task file that you want to get the content of. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// Whether to delete children of a directory. If the filePath parameter represents + /// a directory instead of a file, you can set recursive to true to delete the + /// directory and all of the files and subdirectories in it. If recursive is false + /// then the directory must be empty or deletion will fail. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task DeleteTaskFileAsync(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? recursive = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteTaskFile"); + scope.Start(); + try + { + using HttpMessage message = CreateDeleteTaskFileRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, recursive, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Deletes the specified Task file from the Compute Node where the Task ran. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job that contains the Task. + /// The ID of the Task whose file you want to retrieve. + /// The path to the Task file that you want to get the content of. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// Whether to delete children of a directory. If the filePath parameter represents + /// a directory instead of a file, you can set recursive to true to delete the + /// directory and all of the files and subdirectories in it. If recursive is false + /// then the directory must be empty or deletion will fail. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response DeleteTaskFile(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? recursive = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteTaskFile"); + scope.Start(); + try + { + using HttpMessage message = CreateDeleteTaskFileRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, recursive, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Returns the content of the specified Task file. + /// The ID of the Job that contains the Task. + /// The ID of the Task whose file you want to retrieve. + /// The path to the Task file that you want to get the content of. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The byte range to be retrieved. The default is to retrieve the entire file. The + /// format is bytes=startRange-endRange. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// + public virtual async Task> GetTaskFileAsync(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, string ocpRange = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await GetTaskFileAsync(jobId, taskId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context).ConfigureAwait(false); + return Response.FromValue(response.Content, response); + } + + /// Returns the content of the specified Task file. + /// The ID of the Job that contains the Task. + /// The ID of the Task whose file you want to retrieve. + /// The path to the Task file that you want to get the content of. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The byte range to be retrieved. The default is to retrieve the entire file. The + /// format is bytes=startRange-endRange. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// + public virtual Response GetTaskFile(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, string ocpRange = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = GetTaskFile(jobId, taskId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context); + return Response.FromValue(response.Content, response); + } + + /// + /// [Protocol Method] Returns the content of the specified Task file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job that contains the Task. + /// The ID of the Task whose file you want to retrieve. + /// The path to the Task file that you want to get the content of. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The byte range to be retrieved. The default is to retrieve the entire file. The + /// format is bytes=startRange-endRange. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task GetTaskFileAsync(string jobId, string taskId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, string ocpRange, RequestConditions requestConditions, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + if (requestConditions?.IfMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-Match header for this operation."); + } + if (requestConditions?.IfNoneMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-None-Match header for this operation."); + } + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetTaskFile"); + scope.Start(); + try + { + using HttpMessage message = CreateGetTaskFileRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Returns the content of the specified Task file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job that contains the Task. + /// The ID of the Task whose file you want to retrieve. + /// The path to the Task file that you want to get the content of. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The byte range to be retrieved. The default is to retrieve the entire file. The + /// format is bytes=startRange-endRange. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response GetTaskFile(string jobId, string taskId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, string ocpRange, RequestConditions requestConditions, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + if (requestConditions?.IfMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-Match header for this operation."); + } + if (requestConditions?.IfNoneMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-None-Match header for this operation."); + } + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetTaskFile"); + scope.Start(); + try + { + using HttpMessage message = CreateGetTaskFileRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Gets the properties of the specified Task file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job that contains the Task. + /// The ID of the Task whose file you want to retrieve. + /// The path to the Task file that you want to get the content of. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + internal virtual async Task GetTaskFilePropertiesInternalAsync(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + if (requestConditions?.IfMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-Match header for this operation."); + } + if (requestConditions?.IfNoneMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-None-Match header for this operation."); + } + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetTaskFilePropertiesInternal"); + scope.Start(); + try + { + using HttpMessage message = CreateGetTaskFilePropertiesInternalRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Gets the properties of the specified Task file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job that contains the Task. + /// The ID of the Task whose file you want to retrieve. + /// The path to the Task file that you want to get the content of. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + internal virtual Response GetTaskFilePropertiesInternal(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + if (requestConditions?.IfMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-Match header for this operation."); + } + if (requestConditions?.IfNoneMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-None-Match header for this operation."); + } + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetTaskFilePropertiesInternal"); + scope.Start(); + try + { + using HttpMessage message = CreateGetTaskFilePropertiesInternalRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Adds a user Account to the specified Compute Node. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the machine on which you want to create a user Account. + /// The options to use for creating the user. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// + /// You can add a user Account to a Compute Node only when it is in the idle or + /// running state. + /// + /// + public virtual async Task CreateNodeUserAsync(string poolId, string nodeId, BatchNodeUserCreateContent user, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNull(user, nameof(user)); + + using RequestContent content = user.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await CreateNodeUserAsync(poolId, nodeId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + return response; + } + + /// Adds a user Account to the specified Compute Node. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the machine on which you want to create a user Account. + /// The options to use for creating the user. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// + /// You can add a user Account to a Compute Node only when it is in the idle or + /// running state. + /// + /// + public virtual Response CreateNodeUser(string poolId, string nodeId, BatchNodeUserCreateContent user, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNull(user, nameof(user)); + + using RequestContent content = user.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = CreateNodeUser(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + return response; + } + + /// + /// [Protocol Method] Adds a user Account to the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the machine on which you want to create a user Account. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task CreateNodeUserAsync(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateNodeUser"); + scope.Start(); + try + { + using HttpMessage message = CreateCreateNodeUserRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Adds a user Account to the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the machine on which you want to create a user Account. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response CreateNodeUser(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateNodeUser"); + scope.Start(); + try + { + using HttpMessage message = CreateCreateNodeUserRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Deletes a user Account from the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the machine on which you want to delete a user Account. + /// The name of the user Account to delete. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task DeleteNodeUserAsync(string poolId, string nodeId, string userName, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(userName, nameof(userName)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteNodeUser"); + scope.Start(); + try + { + using HttpMessage message = CreateDeleteNodeUserRequest(poolId, nodeId, userName, timeOutInSeconds, ocpdate, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Deletes a user Account from the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the machine on which you want to delete a user Account. + /// The name of the user Account to delete. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response DeleteNodeUser(string poolId, string nodeId, string userName, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(userName, nameof(userName)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteNodeUser"); + scope.Start(); + try + { + using HttpMessage message = CreateDeleteNodeUserRequest(poolId, nodeId, userName, timeOutInSeconds, ocpdate, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Updates the password and expiration time of a user Account on the specified Compute Node. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the machine on which you want to update a user Account. + /// The name of the user Account to update. + /// The options to use for updating the user. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// , , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// + /// This operation replaces of all the updatable properties of the Account. For + /// example, if the expiryTime element is not specified, the current value is + /// replaced with the default value, not left unmodified. You can update a user + /// Account on a Compute Node only when it is in the idle or running state. + /// + /// + public virtual async Task ReplaceNodeUserAsync(string poolId, string nodeId, string userName, BatchNodeUserUpdateContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(userName, nameof(userName)); + Argument.AssertNotNull(content, nameof(content)); + + using RequestContent content0 = content.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await ReplaceNodeUserAsync(poolId, nodeId, userName, content0, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + return response; + } + + /// Updates the password and expiration time of a user Account on the specified Compute Node. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the machine on which you want to update a user Account. + /// The name of the user Account to update. + /// The options to use for updating the user. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// , , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// + /// This operation replaces of all the updatable properties of the Account. For + /// example, if the expiryTime element is not specified, the current value is + /// replaced with the default value, not left unmodified. You can update a user + /// Account on a Compute Node only when it is in the idle or running state. + /// + /// + public virtual Response ReplaceNodeUser(string poolId, string nodeId, string userName, BatchNodeUserUpdateContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(userName, nameof(userName)); + Argument.AssertNotNull(content, nameof(content)); + + using RequestContent content0 = content.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = ReplaceNodeUser(poolId, nodeId, userName, content0, timeOutInSeconds, ocpdate, context); + return response; + } + + /// + /// [Protocol Method] Updates the password and expiration time of a user Account on the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the machine on which you want to update a user Account. + /// The name of the user Account to update. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task ReplaceNodeUserAsync(string poolId, string nodeId, string userName, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(userName, nameof(userName)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReplaceNodeUser"); + scope.Start(); + try + { + using HttpMessage message = CreateReplaceNodeUserRequest(poolId, nodeId, userName, content, timeOutInSeconds, ocpdate, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Updates the password and expiration time of a user Account on the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the machine on which you want to update a user Account. + /// The name of the user Account to update. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response ReplaceNodeUser(string poolId, string nodeId, string userName, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(userName, nameof(userName)); + Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.GetTaskFilePropertiesInternal"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReplaceNodeUser"); scope.Start(); try { - using HttpMessage message = CreateGetTaskFilePropertiesInternalRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateReplaceNodeUserRequest(poolId, nodeId, userName, content, timeOutInSeconds, ocpdate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -5026,68 +5914,56 @@ internal virtual Response GetTaskFilePropertiesInternal(string jobId, string tas } } - /// Adds a user Account to the specified Compute Node. + /// Gets information about the specified Compute Node. /// The ID of the Pool that contains the Compute Node. - /// The ID of the machine on which you want to create a user Account. - /// The options to use for creating the user. + /// The ID of the Compute Node that you want to get information about. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// An OData $select clause. /// The cancellation token to use. - /// , or is null. + /// or is null. /// or is an empty string, and was expected to be non-empty. - /// - /// You can add a user Account to a Compute Node only when it is in the idle or - /// running state. - /// - /// - public virtual async Task CreateNodeUserAsync(string poolId, string nodeId, BatchNodeUserCreateContent user, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual async Task> GetNodeAsync(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - Argument.AssertNotNull(user, nameof(user)); - using RequestContent content = user.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await CreateNodeUserAsync(poolId, nodeId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); - return response; + Response response = await GetNodeAsync(poolId, nodeId, timeOutInSeconds, ocpdate, select, context).ConfigureAwait(false); + return Response.FromValue(BatchNode.FromResponse(response), response); } - /// Adds a user Account to the specified Compute Node. + /// Gets information about the specified Compute Node. /// The ID of the Pool that contains the Compute Node. - /// The ID of the machine on which you want to create a user Account. - /// The options to use for creating the user. + /// The ID of the Compute Node that you want to get information about. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// An OData $select clause. /// The cancellation token to use. - /// , or is null. + /// or is null. /// or is an empty string, and was expected to be non-empty. - /// - /// You can add a user Account to a Compute Node only when it is in the idle or - /// running state. - /// - /// - public virtual Response CreateNodeUser(string poolId, string nodeId, BatchNodeUserCreateContent user, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual Response GetNode(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - Argument.AssertNotNull(user, nameof(user)); - using RequestContent content = user.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = CreateNodeUser(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); - return response; + Response response = GetNode(poolId, nodeId, timeOutInSeconds, ocpdate, select, context); + return Response.FromValue(BatchNode.FromResponse(response), response); } /// - /// [Protocol Method] Adds a user Account to the specified Compute Node. + /// [Protocol Method] Gets information about the specified Compute Node. /// /// /// @@ -5096,37 +5972,36 @@ public virtual Response CreateNodeUser(string poolId, string nodeId, BatchNodeUs /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Pool that contains the Compute Node. - /// The ID of the machine on which you want to create a user Account. - /// The content to send as the body of the request. + /// The ID of the Compute Node that you want to get information about. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// An OData $select clause. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// , or is null. + /// or is null. /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task CreateNodeUserAsync(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task GetNodeAsync(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateNodeUser"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetNode"); scope.Start(); try { - using HttpMessage message = CreateCreateNodeUserRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateGetNodeRequest(poolId, nodeId, timeOutInSeconds, ocpdate, select, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -5137,7 +6012,7 @@ public virtual async Task CreateNodeUserAsync(string poolId, string no } /// - /// [Protocol Method] Adds a user Account to the specified Compute Node. + /// [Protocol Method] Gets information about the specified Compute Node. /// /// /// @@ -5146,37 +6021,36 @@ public virtual async Task CreateNodeUserAsync(string poolId, string no /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Pool that contains the Compute Node. - /// The ID of the machine on which you want to create a user Account. - /// The content to send as the body of the request. + /// The ID of the Compute Node that you want to get information about. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// + /// An OData $select clause. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// , or is null. + /// or is null. /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response CreateNodeUser(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual Response GetNode(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateNodeUser"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetNode"); scope.Start(); try { - using HttpMessage message = CreateCreateNodeUserRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateGetNodeRequest(poolId, nodeId, timeOutInSeconds, ocpdate, select, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -5186,66 +6060,76 @@ public virtual Response CreateNodeUser(string poolId, string nodeId, RequestCont } } - // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method - /// - /// [Protocol Method] Deletes a user Account from the specified Compute Node. - /// - /// - /// - /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. - /// - /// - /// - /// + /// Restarts the specified Compute Node. /// The ID of the Pool that contains the Compute Node. - /// The ID of the machine on which you want to delete a user Account. - /// The name of the user Account to delete. + /// The ID of the Compute Node that you want to restart. + /// The options to use for rebooting the Compute Node. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// , or is null. - /// , or is an empty string, and was expected to be non-empty. - /// Service returned a non-success status code. - /// The response returned from the service. - /// - public virtual async Task DeleteNodeUserAsync(string poolId, string nodeId, string userName, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// You can restart a Compute Node only if it is in an idle or running state. + /// + public virtual async Task RebootNodeAsync(string poolId, string nodeId, BatchNodeRebootContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - Argument.AssertNotNullOrEmpty(userName, nameof(userName)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteNodeUser"); - scope.Start(); - try - { - using HttpMessage message = CreateDeleteNodeUserRequest(poolId, nodeId, userName, timeOutInSeconds, ocpdate, context); - return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); - } - catch (Exception e) - { - scope.Failed(e); - throw; - } + using RequestContent content = parameters?.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await RebootNodeAsync(poolId, nodeId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + return response; + } + + /// Restarts the specified Compute Node. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node that you want to restart. + /// The options to use for rebooting the Compute Node. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// You can restart a Compute Node only if it is in an idle or running state. + /// + public virtual Response RebootNode(string poolId, string nodeId, BatchNodeRebootContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + using RequestContent content = parameters?.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = RebootNode(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + return response; } - // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Deletes a user Account from the specified Compute Node. + /// [Protocol Method] Restarts the specified Compute Node. /// /// /// /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. /// /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// /// /// /// The ID of the Pool that contains the Compute Node. - /// The ID of the machine on which you want to delete a user Account. - /// The name of the user Account to delete. + /// The ID of the Compute Node that you want to restart. + /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the @@ -5253,23 +6137,22 @@ public virtual async Task DeleteNodeUserAsync(string poolId, string no /// directly. /// /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// , or is null. - /// , or is an empty string, and was expected to be non-empty. + /// or is null. + /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response DeleteNodeUser(string poolId, string nodeId, string userName, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task RebootNodeAsync(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - Argument.AssertNotNullOrEmpty(userName, nameof(userName)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteNodeUser"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.RebootNode"); scope.Start(); try { - using HttpMessage message = CreateDeleteNodeUserRequest(poolId, nodeId, userName, timeOutInSeconds, ocpdate, context); - return _pipeline.ProcessMessage(message, context); + using HttpMessage message = CreateRebootNodeRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) { @@ -5278,93 +6161,68 @@ public virtual Response DeleteNodeUser(string poolId, string nodeId, string user } } - /// Updates the password and expiration time of a user Account on the specified Compute Node. - /// The ID of the Pool that contains the Compute Node. - /// The ID of the machine on which you want to update a user Account. - /// The name of the user Account to update. - /// The options to use for updating the user. - /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// - /// The time the request was issued. Client libraries typically set this to the - /// current system clock time; set it explicitly if you are calling the REST API - /// directly. - /// - /// The cancellation token to use. - /// , , or is null. - /// , or is an empty string, and was expected to be non-empty. - /// - /// This operation replaces of all the updatable properties of the Account. For - /// example, if the expiryTime element is not specified, the current value is - /// replaced with the default value, not left unmodified. You can update a user - /// Account on a Compute Node only when it is in the idle or running state. - /// - /// - public virtual async Task ReplaceNodeUserAsync(string poolId, string nodeId, string userName, BatchNodeUserUpdateContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) - { - Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); - Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - Argument.AssertNotNullOrEmpty(userName, nameof(userName)); - Argument.AssertNotNull(content, nameof(content)); - - using RequestContent content0 = content.ToRequestContent(); - RequestContext context = FromCancellationToken(cancellationToken); - Response response = await ReplaceNodeUserAsync(poolId, nodeId, userName, content0, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); - return response; - } - - /// Updates the password and expiration time of a user Account on the specified Compute Node. + /// + /// [Protocol Method] Restarts the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// /// The ID of the Pool that contains the Compute Node. - /// The ID of the machine on which you want to update a user Account. - /// The name of the user Account to update. - /// The options to use for updating the user. + /// The ID of the Compute Node that you want to restart. + /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// The cancellation token to use. - /// , , or is null. - /// , or is an empty string, and was expected to be non-empty. - /// - /// This operation replaces of all the updatable properties of the Account. For - /// example, if the expiryTime element is not specified, the current value is - /// replaced with the default value, not left unmodified. You can update a user - /// Account on a Compute Node only when it is in the idle or running state. - /// - /// - public virtual Response ReplaceNodeUser(string poolId, string nodeId, string userName, BatchNodeUserUpdateContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response RebootNode(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - Argument.AssertNotNullOrEmpty(userName, nameof(userName)); - Argument.AssertNotNull(content, nameof(content)); - using RequestContent content0 = content.ToRequestContent(); - RequestContext context = FromCancellationToken(cancellationToken); - Response response = ReplaceNodeUser(poolId, nodeId, userName, content0, timeOutInSeconds, ocpdate, context); - return response; + using var scope = ClientDiagnostics.CreateScope("BatchClient.RebootNode"); + scope.Start(); + try + { + using HttpMessage message = CreateRebootNodeRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } } + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Updates the password and expiration time of a user Account on the specified Compute Node. + /// [Protocol Method] Starts the specified Compute Node. /// /// /// /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. /// /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// /// /// /// The ID of the Pool that contains the Compute Node. - /// The ID of the machine on which you want to update a user Account. - /// The name of the user Account to update. - /// The content to send as the body of the request. + /// The ID of the Compute Node that you want to restart. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the @@ -5372,23 +6230,21 @@ public virtual Response ReplaceNodeUser(string poolId, string nodeId, string use /// directly. /// /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// , , or is null. - /// , or is an empty string, and was expected to be non-empty. + /// or is null. + /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task ReplaceNodeUserAsync(string poolId, string nodeId, string userName, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task StartNodeAsync(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - Argument.AssertNotNullOrEmpty(userName, nameof(userName)); - Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.ReplaceNodeUser"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.StartNode"); scope.Start(); try { - using HttpMessage message = CreateReplaceNodeUserRequest(poolId, nodeId, userName, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateStartNodeRequest(poolId, nodeId, timeOutInSeconds, ocpdate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -5398,25 +6254,19 @@ public virtual async Task ReplaceNodeUserAsync(string poolId, string n } } + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method /// - /// [Protocol Method] Updates the password and expiration time of a user Account on the specified Compute Node. + /// [Protocol Method] Starts the specified Compute Node. /// /// /// /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. /// /// - /// - /// - /// Please try the simpler convenience overload with strongly typed models first. - /// - /// /// /// /// The ID of the Pool that contains the Compute Node. - /// The ID of the machine on which you want to update a user Account. - /// The name of the user Account to update. - /// The content to send as the body of the request. + /// The ID of the Compute Node that you want to restart. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the @@ -5424,23 +6274,21 @@ public virtual async Task ReplaceNodeUserAsync(string poolId, string n /// directly. /// /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// , , or is null. - /// , or is an empty string, and was expected to be non-empty. + /// or is null. + /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response ReplaceNodeUser(string poolId, string nodeId, string userName, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual Response StartNode(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - Argument.AssertNotNullOrEmpty(userName, nameof(userName)); - Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.ReplaceNodeUser"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.StartNode"); scope.Start(); try { - using HttpMessage message = CreateReplaceNodeUserRequest(poolId, nodeId, userName, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateStartNodeRequest(poolId, nodeId, timeOutInSeconds, ocpdate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -5450,56 +6298,68 @@ public virtual Response ReplaceNodeUser(string poolId, string nodeId, string use } } - /// Gets information about the specified Compute Node. + /// Reinstalls the operating system on the specified Compute Node. /// The ID of the Pool that contains the Compute Node. - /// The ID of the Compute Node that you want to get information about. + /// The ID of the Compute Node that you want to restart. + /// The options to use for reimaging the Compute Node. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// An OData $select clause. /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// - public virtual async Task> GetNodeAsync(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, CancellationToken cancellationToken = default) + /// + /// You can reinstall the operating system on a Compute Node only if it is in an + /// idle or running state. This API can be invoked only on Pools created with the + /// cloud service configuration property. + /// + /// + public virtual async Task ReimageNodeAsync(string poolId, string nodeId, BatchNodeReimageContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + using RequestContent content = parameters?.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await GetNodeAsync(poolId, nodeId, timeOutInSeconds, ocpdate, select, context).ConfigureAwait(false); - return Response.FromValue(BatchNode.FromResponse(response), response); + Response response = await ReimageNodeAsync(poolId, nodeId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + return response; } - /// Gets information about the specified Compute Node. + /// Reinstalls the operating system on the specified Compute Node. /// The ID of the Pool that contains the Compute Node. - /// The ID of the Compute Node that you want to get information about. + /// The ID of the Compute Node that you want to restart. + /// The options to use for reimaging the Compute Node. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// An OData $select clause. /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// - public virtual Response GetNode(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, CancellationToken cancellationToken = default) + /// + /// You can reinstall the operating system on a Compute Node only if it is in an + /// idle or running state. This API can be invoked only on Pools created with the + /// cloud service configuration property. + /// + /// + public virtual Response ReimageNode(string poolId, string nodeId, BatchNodeReimageContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + using RequestContent content = parameters?.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = GetNode(poolId, nodeId, timeOutInSeconds, ocpdate, select, context); - return Response.FromValue(BatchNode.FromResponse(response), response); + Response response = ReimageNode(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + return response; } /// - /// [Protocol Method] Gets information about the specified Compute Node. + /// [Protocol Method] Reinstalls the operating system on the specified Compute Node. /// /// /// @@ -5508,36 +6368,36 @@ public virtual Response GetNode(string poolId, string nodeId, int? ti /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Pool that contains the Compute Node. - /// The ID of the Compute Node that you want to get information about. + /// The ID of the Compute Node that you want to restart. + /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// An OData $select clause. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task GetNodeAsync(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + /// + public virtual async Task ReimageNodeAsync(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.GetNode"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReimageNode"); scope.Start(); try { - using HttpMessage message = CreateGetNodeRequest(poolId, nodeId, timeOutInSeconds, ocpdate, select, context); + using HttpMessage message = CreateReimageNodeRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -5548,7 +6408,7 @@ public virtual async Task GetNodeAsync(string poolId, string nodeId, i } /// - /// [Protocol Method] Gets information about the specified Compute Node. + /// [Protocol Method] Reinstalls the operating system on the specified Compute Node. /// /// /// @@ -5557,36 +6417,36 @@ public virtual async Task GetNodeAsync(string poolId, string nodeId, i /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Pool that contains the Compute Node. - /// The ID of the Compute Node that you want to get information about. + /// The ID of the Compute Node that you want to restart. + /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// - /// An OData $select clause. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// or is null. /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response GetNode(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + /// + public virtual Response ReimageNode(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.GetNode"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReimageNode"); scope.Start(); try { - using HttpMessage message = CreateGetNodeRequest(poolId, nodeId, timeOutInSeconds, ocpdate, select, context); + using HttpMessage message = CreateReimageNodeRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -5596,10 +6456,10 @@ public virtual Response GetNode(string poolId, string nodeId, int? timeOutInSeco } } - /// Restarts the specified Compute Node. + /// Deallocates the specified Compute Node. /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node that you want to restart. - /// The options to use for rebooting the Compute Node. + /// The options to use for deallocating the Compute Node. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the @@ -5609,23 +6469,23 @@ public virtual Response GetNode(string poolId, string nodeId, int? timeOutInSeco /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// You can restart a Compute Node only if it is in an idle or running state. - /// - public virtual async Task RebootNodeAsync(string poolId, string nodeId, BatchNodeRebootContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// You can deallocate a Compute Node only if it is in an idle or running state. + /// + public virtual async Task DeallocateNodeAsync(string poolId, string nodeId, BatchNodeDeallocateContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); using RequestContent content = parameters?.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await RebootNodeAsync(poolId, nodeId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + Response response = await DeallocateNodeAsync(poolId, nodeId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); return response; } - /// Restarts the specified Compute Node. + /// Deallocates the specified Compute Node. /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node that you want to restart. - /// The options to use for rebooting the Compute Node. + /// The options to use for deallocating the Compute Node. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the @@ -5635,21 +6495,21 @@ public virtual async Task RebootNodeAsync(string poolId, string nodeId /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// You can restart a Compute Node only if it is in an idle or running state. - /// - public virtual Response RebootNode(string poolId, string nodeId, BatchNodeRebootContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// You can deallocate a Compute Node only if it is in an idle or running state. + /// + public virtual Response DeallocateNode(string poolId, string nodeId, BatchNodeDeallocateContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); using RequestContent content = parameters?.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = RebootNode(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + Response response = DeallocateNode(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); return response; } /// - /// [Protocol Method] Restarts the specified Compute Node. + /// [Protocol Method] Deallocates the specified Compute Node. /// /// /// @@ -5658,7 +6518,7 @@ public virtual Response RebootNode(string poolId, string nodeId, BatchNodeReboot /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -5677,17 +6537,17 @@ public virtual Response RebootNode(string poolId, string nodeId, BatchNodeReboot /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task RebootNodeAsync(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task DeallocateNodeAsync(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.RebootNode"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeallocateNode"); scope.Start(); try { - using HttpMessage message = CreateRebootNodeRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateDeallocateNodeRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -5698,7 +6558,7 @@ public virtual async Task RebootNodeAsync(string poolId, string nodeId } /// - /// [Protocol Method] Restarts the specified Compute Node. + /// [Protocol Method] Deallocates the specified Compute Node. /// /// /// @@ -5707,7 +6567,7 @@ public virtual async Task RebootNodeAsync(string poolId, string nodeId /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -5726,17 +6586,17 @@ public virtual async Task RebootNodeAsync(string poolId, string nodeId /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response RebootNode(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual Response DeallocateNode(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.RebootNode"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeallocateNode"); scope.Start(); try { - using HttpMessage message = CreateRebootNodeRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateDeallocateNodeRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -6003,9 +6863,8 @@ public virtual Response EnableNodeScheduling(string poolId, string nodeId, int? /// or is null. /// or is an empty string, and was expected to be non-empty. /// - /// Before you can remotely login to a Compute Node using the remote login - /// settings, you must create a user Account on the Compute Node. This API can be - /// invoked only on Pools created with the virtual machine configuration property. + /// Before you can remotely login to a Compute Node using the remote login settings, + /// you must create a user Account on the Compute Node. /// /// public virtual async Task> GetNodeRemoteLoginSettingsAsync(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) @@ -6031,9 +6890,8 @@ public virtual async Task> GetNodeRemoteL /// or is null. /// or is an empty string, and was expected to be non-empty. /// - /// Before you can remotely login to a Compute Node using the remote login - /// settings, you must create a user Account on the Compute Node. This API can be - /// invoked only on Pools created with the virtual machine configuration property. + /// Before you can remotely login to a Compute Node using the remote login settings, + /// you must create a user Account on the Compute Node. /// /// public virtual Response GetNodeRemoteLoginSettings(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) @@ -7036,7 +7894,7 @@ public virtual Pageable GetApplications(int? timeOutInSeconds, DateT /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. /// /// The cancellation token to use. /// @@ -7082,7 +7940,7 @@ public virtual AsyncPageable GetPoolUsageMetricsAsync(int /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. /// /// The cancellation token to use. /// @@ -7140,7 +7998,7 @@ public virtual Pageable GetPoolUsageMetrics(int? timeOutI /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. /// /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. @@ -7191,7 +8049,7 @@ public virtual AsyncPageable GetPoolUsageMetricsAsync(int? timeOutIn /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. /// /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. @@ -7204,7 +8062,7 @@ public virtual Pageable GetPoolUsageMetrics(int? timeOutInSeconds, D return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetPoolUsageMetrics", "value", "odata.nextLink", context); } - /// Lists all of the Pools in the specified Account. + /// Lists all of the Pools which be mounted. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the @@ -7217,7 +8075,7 @@ public virtual Pageable GetPoolUsageMetrics(int? timeOutInSeconds, D /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-pools. /// /// An OData $select clause. /// An OData $expand clause. @@ -7231,7 +8089,7 @@ public virtual AsyncPageable GetPoolsAsync(int? timeOutInSeconds = nu return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchPool.DeserializeBatchPool(e), ClientDiagnostics, _pipeline, "BatchClient.GetPools", "value", "odata.nextLink", context); } - /// Lists all of the Pools in the specified Account. + /// Lists all of the Pools which be mounted. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the @@ -7244,7 +8102,7 @@ public virtual AsyncPageable GetPoolsAsync(int? timeOutInSeconds = nu /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-pools. /// /// An OData $select clause. /// An OData $expand clause. @@ -7259,7 +8117,7 @@ public virtual Pageable GetPools(int? timeOutInSeconds = null, DateTi } /// - /// [Protocol Method] Lists all of the Pools in the specified Account. + /// [Protocol Method] Lists all of the Pools which be mounted. /// /// /// @@ -7285,7 +8143,7 @@ public virtual Pageable GetPools(int? timeOutInSeconds = null, DateTi /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-pools. /// /// An OData $select clause. /// An OData $expand clause. @@ -7301,7 +8159,7 @@ public virtual AsyncPageable GetPoolsAsync(int? timeOutInSeconds, Da } /// - /// [Protocol Method] Lists all of the Pools in the specified Account. + /// [Protocol Method] Lists all of the Pools which be mounted. /// /// /// @@ -7327,7 +8185,7 @@ public virtual AsyncPageable GetPoolsAsync(int? timeOutInSeconds, Da /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-pools. /// /// An OData $select clause. /// An OData $expand clause. @@ -7355,7 +8213,7 @@ public virtual Pageable GetPools(int? timeOutInSeconds, DateTimeOffs /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images. /// /// The cancellation token to use. /// @@ -7380,7 +8238,7 @@ public virtual AsyncPageable GetSupportedImagesAsync(int? t /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images. /// /// The cancellation token to use. /// @@ -7419,7 +8277,7 @@ public virtual Pageable GetSupportedImages(int? timeOutInSe /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images. /// /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. @@ -7459,7 +8317,7 @@ public virtual AsyncPageable GetSupportedImagesAsync(int? timeOutInS /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images. /// /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. @@ -7489,7 +8347,7 @@ public virtual Pageable GetSupportedImages(int? timeOutInSeconds, Da /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images. /// /// The cancellation token to use. /// @@ -7518,7 +8376,7 @@ public virtual AsyncPageable GetPoolNodeCountsAsync(int? ti /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images. /// /// The cancellation token to use. /// @@ -7559,7 +8417,7 @@ public virtual Pageable GetPoolNodeCounts(int? timeOutInSec /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images. /// /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. @@ -7601,7 +8459,7 @@ public virtual AsyncPageable GetPoolNodeCountsAsync(int? timeOutInSe /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images. /// /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. @@ -7627,7 +8485,7 @@ public virtual Pageable GetPoolNodeCounts(int? timeOutInSeconds, Dat /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-jobs. /// /// An OData $select clause. /// An OData $expand clause. @@ -7654,7 +8512,7 @@ public virtual AsyncPageable GetJobsAsync(int? timeOutInSeconds = null /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-jobs. /// /// An OData $select clause. /// An OData $expand clause. @@ -7695,7 +8553,7 @@ public virtual Pageable GetJobs(int? timeOutInSeconds = null, DateTime /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-jobs. /// /// An OData $select clause. /// An OData $expand clause. @@ -7737,7 +8595,7 @@ public virtual AsyncPageable GetJobsAsync(int? timeOutInSeconds, Dat /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-jobs. /// /// An OData $select clause. /// An OData $expand clause. @@ -7766,7 +8624,7 @@ public virtual Pageable GetJobs(int? timeOutInSeconds, DateTimeOffse /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. /// /// An OData $select clause. /// An OData $expand clause. @@ -7798,7 +8656,7 @@ public virtual AsyncPageable GetJobsFromSchedulesAsync(string jobSched /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. /// /// An OData $select clause. /// An OData $expand clause. @@ -7844,7 +8702,7 @@ public virtual Pageable GetJobsFromSchedules(string jobScheduleId, int /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. /// /// An OData $select clause. /// An OData $expand clause. @@ -7891,7 +8749,7 @@ public virtual AsyncPageable GetJobsFromSchedulesAsync(string jobSch /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. /// /// An OData $select clause. /// An OData $expand clause. @@ -7927,7 +8785,7 @@ public virtual Pageable GetJobsFromSchedules(string jobScheduleId, i /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. /// /// An OData $select clause. /// The cancellation token to use. @@ -7969,7 +8827,7 @@ public virtual AsyncPageable GetJobPrep /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. /// /// An OData $select clause. /// The cancellation token to use. @@ -8023,27 +8881,166 @@ public virtual Pageable GetJobPreparati /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + /// + /// An OData $select clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual AsyncPageable GetJobPreparationAndReleaseTaskStatusesAsync(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobPreparationAndReleaseTaskStatusesRequest(jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobPreparationAndReleaseTaskStatusesNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetJobPreparationAndReleaseTaskStatuses", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists the execution status of the Job Preparation and Job Release Task for the + /// specified Job across the Compute Nodes where the Job has run. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. /// /// An OData $select clause. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// is null. /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual Pageable GetJobPreparationAndReleaseTaskStatuses(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobPreparationAndReleaseTaskStatusesRequest(jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobPreparationAndReleaseTaskStatusesNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetJobPreparationAndReleaseTaskStatuses", "value", "odata.nextLink", context); + } + + /// Lists all of the Certificates that have been added to the specified Account. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates. + /// + /// An OData $select clause. + /// The cancellation token to use. + /// + public virtual AsyncPageable GetCertificatesAsync(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, CancellationToken cancellationToken = default) + { + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetCertificatesRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetCertificatesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchCertificate.DeserializeBatchCertificate(e), ClientDiagnostics, _pipeline, "BatchClient.GetCertificates", "value", "odata.nextLink", context); + } + + /// Lists all of the Certificates that have been added to the specified Account. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates. + /// + /// An OData $select clause. + /// The cancellation token to use. + /// + public virtual Pageable GetCertificates(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, CancellationToken cancellationToken = default) + { + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetCertificatesRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetCertificatesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchCertificate.DeserializeBatchCertificate(e), ClientDiagnostics, _pipeline, "BatchClient.GetCertificates", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists all of the Certificates that have been added to the specified Account. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates. + /// + /// An OData $select clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual AsyncPageable GetJobPreparationAndReleaseTaskStatusesAsync(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + /// + public virtual AsyncPageable GetCertificatesAsync(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) { - Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobPreparationAndReleaseTaskStatusesRequest(jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobPreparationAndReleaseTaskStatusesNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); - return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetJobPreparationAndReleaseTaskStatuses", "value", "odata.nextLink", context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetCertificatesRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetCertificatesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetCertificates", "value", "odata.nextLink", context); } /// - /// [Protocol Method] Lists the execution status of the Job Preparation and Job Release Task for the - /// specified Job across the Compute Nodes where the Job has run. + /// [Protocol Method] Lists all of the Certificates that have been added to the specified Account. /// /// /// @@ -8052,12 +9049,11 @@ public virtual AsyncPageable GetJobPreparationAndReleaseTaskStatuses /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// - /// The ID of the Job. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". /// /// The time the request was issued. Client libraries typically set this to the @@ -8070,22 +9066,18 @@ public virtual AsyncPageable GetJobPreparationAndReleaseTaskStatuses /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-certificates. /// /// An OData $select clause. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. - /// is null. - /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual Pageable GetJobPreparationAndReleaseTaskStatuses(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + /// + public virtual Pageable GetCertificates(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) { - Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobPreparationAndReleaseTaskStatusesRequest(jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobPreparationAndReleaseTaskStatusesNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); - return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetJobPreparationAndReleaseTaskStatuses", "value", "odata.nextLink", context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetCertificatesRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetCertificatesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetCertificates", "value", "odata.nextLink", context); } /// Lists all of the Job Schedules in the specified Account. @@ -8101,7 +9093,7 @@ public virtual Pageable GetJobPreparationAndReleaseTaskStatuses(stri /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. /// /// An OData $select clause. /// An OData $expand clause. @@ -8128,7 +9120,7 @@ public virtual AsyncPageable GetJobSchedulesAsync(int? timeOut /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. /// /// An OData $select clause. /// An OData $expand clause. @@ -8169,7 +9161,7 @@ public virtual Pageable GetJobSchedules(int? timeOutInSeconds /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. /// /// An OData $select clause. /// An OData $expand clause. @@ -8211,7 +9203,7 @@ public virtual AsyncPageable GetJobSchedulesAsync(int? timeOutInSeco /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. /// /// An OData $select clause. /// An OData $expand clause. @@ -8240,7 +9232,7 @@ public virtual Pageable GetJobSchedules(int? timeOutInSeconds, DateT /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-tasks. /// /// An OData $select clause. /// An OData $expand clause. @@ -8277,7 +9269,7 @@ public virtual AsyncPageable GetTasksAsync(string jobId, int? timeOut /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-tasks. /// /// An OData $select clause. /// An OData $expand clause. @@ -8328,7 +9320,7 @@ public virtual Pageable GetTasks(string jobId, int? timeOutInSeconds /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-tasks. /// /// An OData $select clause. /// An OData $expand clause. @@ -8375,7 +9367,7 @@ public virtual AsyncPageable GetTasksAsync(string jobId, int? timeOu /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-tasks. /// /// An OData $select clause. /// An OData $expand clause. @@ -8549,7 +9541,7 @@ public virtual Pageable GetSubTasks(string jobId, string taskId, int /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-task-files. /// /// /// Whether to list children of the Task directory. This parameter can be used in @@ -8585,7 +9577,7 @@ public virtual AsyncPageable GetTaskFilesAsync(string jobId, stri /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-task-files. /// /// /// Whether to list children of the Task directory. This parameter can be used in @@ -8635,7 +9627,7 @@ public virtual Pageable GetTaskFiles(string jobId, string taskId, /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-task-files. /// /// /// Whether to list children of the Task directory. This parameter can be used in @@ -8686,7 +9678,7 @@ public virtual AsyncPageable GetTaskFilesAsync(string jobId, string /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-task-files. /// /// /// Whether to list children of the Task directory. This parameter can be used in @@ -8722,7 +9714,7 @@ public virtual Pageable GetTaskFiles(string jobId, string taskId, in /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. /// /// An OData $select clause. /// The cancellation token to use. @@ -8753,7 +9745,7 @@ public virtual AsyncPageable GetNodesAsync(string poolId, int? timeOu /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. /// /// An OData $select clause. /// The cancellation token to use. @@ -8798,7 +9790,7 @@ public virtual Pageable GetNodes(string poolId, int? timeOutInSeconds /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. /// /// An OData $select clause. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. @@ -8844,7 +9836,7 @@ public virtual AsyncPageable GetNodesAsync(string poolId, int? timeO /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. /// /// An OData $select clause. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. @@ -9023,7 +10015,7 @@ public virtual Pageable GetNodeExtensions(string poolId, string node /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. /// /// Whether to list children of a directory. /// The cancellation token to use. @@ -9056,7 +10048,7 @@ public virtual AsyncPageable GetNodeFilesAsync(string poolId, str /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. /// /// Whether to list children of a directory. /// The cancellation token to use. @@ -9103,7 +10095,7 @@ public virtual Pageable GetNodeFiles(string poolId, string nodeId /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. /// /// Whether to list children of a directory. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. @@ -9151,7 +10143,7 @@ public virtual AsyncPageable GetNodeFilesAsync(string poolId, string /// /// /// An OData $filter clause. For more information on constructing this filter, see - /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. /// /// Whether to list children of a directory. /// The request context, which can override default behaviors of the client pipeline on a per-call basis. @@ -9727,7 +10719,7 @@ internal HttpMessage CreateGetPoolNodeCountsRequest(int? timeOutInSeconds, DateT return message; } - internal HttpMessage CreateDeleteJobRequest(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateDeleteJobRequest(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, bool? force, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier202); var request = message.Request; @@ -9741,6 +10733,10 @@ internal HttpMessage CreateDeleteJobRequest(string jobId, int? timeOutInSeconds, { uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); } + if (force != null) + { + uri.AppendQuery("force", force.Value, true); + } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); @@ -9917,7 +10913,7 @@ internal HttpMessage CreateEnableJobRequest(string jobId, int? timeOutInSeconds, return message; } - internal HttpMessage CreateTerminateJobRequest(string jobId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateTerminateJobRequest(string jobId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, bool? force, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier202); var request = message.Request; @@ -9932,6 +10928,10 @@ internal HttpMessage CreateTerminateJobRequest(string jobId, RequestContent cont { uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); } + if (force != null) + { + uri.AppendQuery("force", force.Value, true); + } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); @@ -10121,6 +11121,156 @@ internal HttpMessage CreateGetJobTaskCountsRequest(string jobId, int? timeOutInS return message; } + internal HttpMessage CreateCreateCertificateRequest(RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier201); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/certificates", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + request.Headers.Add("Content-Type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateGetCertificatesRequest(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/certificates", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (maxresults != null) + { + uri.AppendQuery("maxresults", maxresults.Value, true); + } + if (filter != null) + { + uri.AppendQuery("$filter", filter, true); + } + if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) + { + uri.AppendQueryDelimited("$select", select, ",", true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateCancelCertificateDeletionRequest(string thumbprintAlgorithm, string thumbprint, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier204); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/certificates(thumbprintAlgorithm=", false); + uri.AppendPath(thumbprintAlgorithm, true); + uri.AppendPath(",thumbprint=", false); + uri.AppendPath(thumbprint, true); + uri.AppendPath(")/canceldelete", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateDeleteCertificateRequest(string thumbprintAlgorithm, string thumbprint, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier202); + var request = message.Request; + request.Method = RequestMethod.Delete; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/certificates(thumbprintAlgorithm=", false); + uri.AppendPath(thumbprintAlgorithm, true); + uri.AppendPath(",thumbprint=", false); + uri.AppendPath(thumbprint, true); + uri.AppendPath(")", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetCertificateRequest(string thumbprintAlgorithm, string thumbprint, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/certificates(thumbprintAlgorithm=", false); + uri.AppendPath(thumbprintAlgorithm, true); + uri.AppendPath(",thumbprint=", false); + uri.AppendPath(thumbprint, true); + uri.AppendPath(")", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) + { + uri.AppendQueryDelimited("$select", select, ",", true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + internal HttpMessage CreateJobScheduleExistsRequest(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200404); @@ -10150,7 +11300,7 @@ internal HttpMessage CreateJobScheduleExistsRequest(string jobScheduleId, int? t return message; } - internal HttpMessage CreateDeleteJobScheduleRequest(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateDeleteJobScheduleRequest(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, bool? force, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier202); var request = message.Request; @@ -10164,6 +11314,10 @@ internal HttpMessage CreateDeleteJobScheduleRequest(string jobScheduleId, int? t { uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); } + if (force != null) + { + uri.AppendQuery("force", force.Value, true); + } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); @@ -10338,7 +11492,7 @@ internal HttpMessage CreateEnableJobScheduleRequest(string jobScheduleId, int? t return message; } - internal HttpMessage CreateTerminateJobScheduleRequest(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateTerminateJobScheduleRequest(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, bool? force, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier202); var request = message.Request; @@ -10353,6 +11507,10 @@ internal HttpMessage CreateTerminateJobScheduleRequest(string jobScheduleId, int { uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); } + if (force != null) + { + uri.AppendQuery("force", force.Value, true); + } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); @@ -11025,6 +12183,94 @@ internal HttpMessage CreateRebootNodeRequest(string poolId, string nodeId, Reque return message; } + internal HttpMessage CreateStartNodeRequest(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier202); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/nodes/", false); + uri.AppendPath(nodeId, true); + uri.AppendPath("/start", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateReimageNodeRequest(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier202); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/nodes/", false); + uri.AppendPath(nodeId, true); + uri.AppendPath("/reimage", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + request.Headers.Add("Content-Type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateDeallocateNodeRequest(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier202); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/nodes/", false); + uri.AppendPath(nodeId, true); + uri.AppendPath("/deallocate", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + request.Headers.Add("Content-Type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + internal HttpMessage CreateDisableNodeSchedulingRequest(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); @@ -11543,6 +12789,25 @@ internal HttpMessage CreateGetJobPreparationAndReleaseTaskStatusesNextPageReques return message; } + internal HttpMessage CreateGetCertificatesNextPageRequest(string nextLink, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendRawNextLink(nextLink, false); + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + internal HttpMessage CreateGetJobSchedulesNextPageRequest(string nextLink, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClientOptions.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClientOptions.cs index eab2e5942414..c4f6c163cc57 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClientOptions.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClientOptions.cs @@ -13,13 +13,13 @@ namespace Azure.Compute.Batch /// Client options for BatchClient. public partial class BatchClientOptions : ClientOptions { - private const ServiceVersion LatestVersion = ServiceVersion.V2024_02_01_19_0; + private const ServiceVersion LatestVersion = ServiceVersion.V2024_07_01_20_0; /// The version of the service to use. public enum ServiceVersion { - /// Service version "2024-02-01.19.0". - V2024_02_01_19_0 = 1, + /// Service version "2024-07-01.20.0". + V2024_07_01_20_0 = 1, } internal string Version { get; } @@ -29,7 +29,7 @@ public BatchClientOptions(ServiceVersion version = LatestVersion) { Version = version switch { - ServiceVersion.V2024_02_01_19_0 => "2024-02-01.19.0", + ServiceVersion.V2024_07_01_20_0 => "2024-07-01.20.0", _ => throw new NotSupportedException() }; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJob.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJob.cs index b516fefc1292..3084acc36928 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJob.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJob.cs @@ -83,7 +83,7 @@ public BatchJob(BatchPoolInfo poolInfo) /// The network configuration for the Job. /// A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. /// The execution information for the Job. - /// Resource usage statistics for the entire lifetime of the Job. This property is populated only if the CloudJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + /// Resource usage statistics for the entire lifetime of the Job. This property is populated only if the BatchJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. /// Keeps track of any properties unknown to the library. internal BatchJob(string id, string displayName, bool? usesTaskDependencies, string url, string eTag, DateTimeOffset? lastModified, DateTimeOffset? creationTime, BatchJobState? state, DateTimeOffset? stateTransitionTime, BatchJobState? previousState, DateTimeOffset? previousStateTransitionTime, int? priority, bool? allowTaskPreemption, int? maxParallelTasks, BatchJobConstraints constraints, BatchJobManagerTask jobManagerTask, BatchJobPreparationTask jobPreparationTask, BatchJobReleaseTask jobReleaseTask, IReadOnlyList commonEnvironmentSettings, BatchPoolInfo poolInfo, OnAllBatchTasksComplete? onAllTasksComplete, OnBatchTaskFailure? onTaskFailure, BatchJobNetworkConfiguration networkConfiguration, IList metadata, BatchJobExecutionInfo executionInfo, BatchJobStatistics stats, IDictionary serializedAdditionalRawData) { @@ -171,7 +171,7 @@ internal BatchJob() public IList Metadata { get; } /// The execution information for the Job. public BatchJobExecutionInfo ExecutionInfo { get; } - /// Resource usage statistics for the entire lifetime of the Job. This property is populated only if the CloudJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + /// Resource usage statistics for the entire lifetime of the Job. This property is populated only if the BatchJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. public BatchJobStatistics Stats { get; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobManagerTask.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobManagerTask.cs index 7f679bb2fb59..9ebea8b406d9 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobManagerTask.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobManagerTask.cs @@ -71,7 +71,7 @@ public partial class BatchJobManagerTask /// Initializes a new instance of . /// A string that uniquely identifies the Job Manager Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. - /// The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). /// or is null. public BatchJobManagerTask(string id, string commandLine) { @@ -89,7 +89,7 @@ public BatchJobManagerTask(string id, string commandLine) /// Initializes a new instance of . /// A string that uniquely identifies the Job Manager Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. /// The display name of the Job Manager Task. It need not be unique and can contain any Unicode characters up to a maximum length of 1024. - /// The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). /// The settings for the container under which the Job Manager Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. /// A list of files that the Batch service will download to the Compute Node before running the command line. Files listed under this element are located in the Task's working directory. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. /// A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. @@ -141,7 +141,7 @@ internal BatchJobManagerTask() public string Id { get; set; } /// The display name of the Job Manager Task. It need not be unique and can contain any Unicode characters up to a maximum length of 1024. public string DisplayName { get; set; } - /// The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). public string CommandLine { get; set; } /// The settings for the container under which the Job Manager Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. public BatchTaskContainerSettings ContainerSettings { get; set; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.Serialization.cs index 819bef2cfd5a..5afeec11834a 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.Serialization.cs @@ -36,6 +36,8 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("subnetId"u8); writer.WriteStringValue(SubnetId); + writer.WritePropertyName("skipWithdrawFromVNet"u8); + writer.WriteBooleanValue(SkipWithdrawFromVNet); if (options.Format != "W" && _serializedAdditionalRawData != null) { foreach (var item in _serializedAdditionalRawData) @@ -74,6 +76,7 @@ internal static BatchJobNetworkConfiguration DeserializeBatchJobNetworkConfigura return null; } string subnetId = default; + bool skipWithdrawFromVNet = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -83,13 +86,18 @@ internal static BatchJobNetworkConfiguration DeserializeBatchJobNetworkConfigura subnetId = property.Value.GetString(); continue; } + if (property.NameEquals("skipWithdrawFromVNet"u8)) + { + skipWithdrawFromVNet = property.Value.GetBoolean(); + continue; + } if (options.Format != "W") { rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); } } serializedAdditionalRawData = rawDataDictionary; - return new BatchJobNetworkConfiguration(subnetId, serializedAdditionalRawData); + return new BatchJobNetworkConfiguration(subnetId, skipWithdrawFromVNet, serializedAdditionalRawData); } BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.cs index d6697158bf5e..931e05614029 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.cs @@ -46,21 +46,25 @@ public partial class BatchJobNetworkConfiguration private IDictionary _serializedAdditionalRawData; /// Initializes a new instance of . - /// The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. This will only work with a VirtualMachineConfiguration Pool. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + /// The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://learn.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + /// Whether to withdraw Compute Nodes from the virtual network to DNC when the job is terminated or deleted. If true, nodes will remain joined to the virtual network to DNC. If false, nodes will automatically withdraw when the job ends. Defaults to false. /// is null. - public BatchJobNetworkConfiguration(string subnetId) + public BatchJobNetworkConfiguration(string subnetId, bool skipWithdrawFromVNet) { Argument.AssertNotNull(subnetId, nameof(subnetId)); SubnetId = subnetId; + SkipWithdrawFromVNet = skipWithdrawFromVNet; } /// Initializes a new instance of . - /// The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. This will only work with a VirtualMachineConfiguration Pool. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + /// The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://learn.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + /// Whether to withdraw Compute Nodes from the virtual network to DNC when the job is terminated or deleted. If true, nodes will remain joined to the virtual network to DNC. If false, nodes will automatically withdraw when the job ends. Defaults to false. /// Keeps track of any properties unknown to the library. - internal BatchJobNetworkConfiguration(string subnetId, IDictionary serializedAdditionalRawData) + internal BatchJobNetworkConfiguration(string subnetId, bool skipWithdrawFromVNet, IDictionary serializedAdditionalRawData) { SubnetId = subnetId; + SkipWithdrawFromVNet = skipWithdrawFromVNet; _serializedAdditionalRawData = serializedAdditionalRawData; } @@ -69,7 +73,9 @@ internal BatchJobNetworkConfiguration() { } - /// The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. This will only work with a VirtualMachineConfiguration Pool. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + /// The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://learn.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. public string SubnetId { get; set; } + /// Whether to withdraw Compute Nodes from the virtual network to DNC when the job is terminated or deleted. If true, nodes will remain joined to the virtual network to DNC. If false, nodes will automatically withdraw when the job ends. Defaults to false. + public bool SkipWithdrawFromVNet { get; set; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTask.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTask.cs index ce02002b17a0..cef539ebd5a2 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTask.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTask.cs @@ -73,7 +73,7 @@ public partial class BatchJobPreparationTask private IDictionary _serializedAdditionalRawData; /// Initializes a new instance of . - /// The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). /// is null. public BatchJobPreparationTask(string commandLine) { @@ -86,7 +86,7 @@ public BatchJobPreparationTask(string commandLine) /// Initializes a new instance of . /// A string that uniquely identifies the Job Preparation Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobpreparation'. No other Task in the Job can have the same ID as the Job Preparation Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). - /// The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). /// The settings for the container under which the Job Preparation Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. /// A list of files that the Batch service will download to the Compute Node before running the command line. Files listed under this element are located in the Task's working directory. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. /// A list of environment variable settings for the Job Preparation Task. @@ -116,7 +116,7 @@ internal BatchJobPreparationTask() /// A string that uniquely identifies the Job Preparation Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobpreparation'. No other Task in the Job can have the same ID as the Job Preparation Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). public string Id { get; set; } - /// The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). public string CommandLine { get; set; } /// The settings for the container under which the Job Preparation Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. public BatchTaskContainerSettings ContainerSettings { get; set; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTask.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTask.cs index acbf28a11f7d..c9a63263aaf7 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTask.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTask.cs @@ -63,7 +63,7 @@ public partial class BatchJobReleaseTask private IDictionary _serializedAdditionalRawData; /// Initializes a new instance of . - /// The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). /// is null. public BatchJobReleaseTask(string commandLine) { @@ -76,7 +76,7 @@ public BatchJobReleaseTask(string commandLine) /// Initializes a new instance of . /// A string that uniquely identifies the Job Release Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobrelease'. No other Task in the Job can have the same ID as the Job Release Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). - /// The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). /// The settings for the container under which the Job Release Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. /// A list of files that the Batch service will download to the Compute Node before running the command line. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. Files listed under this element are located in the Task's working directory. /// A list of environment variable settings for the Job Release Task. @@ -104,7 +104,7 @@ internal BatchJobReleaseTask() /// A string that uniquely identifies the Job Release Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobrelease'. No other Task in the Job can have the same ID as the Job Release Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). public string Id { get; set; } - /// The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). public string CommandLine { get; set; } /// The settings for the container under which the Job Release Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. public BatchTaskContainerSettings ContainerSettings { get; set; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleStatistics.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleStatistics.Serialization.cs index 9da03e653a47..4c44d8f1271c 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleStatistics.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleStatistics.Serialization.cs @@ -47,19 +47,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("wallClockTime"u8); writer.WriteStringValue(WallClockTime, "P"); writer.WritePropertyName("readIOps"u8); - writer.WriteNumberValue(ReadIOps); + writer.WriteStringValue(ReadIOps.ToString()); writer.WritePropertyName("writeIOps"u8); - writer.WriteNumberValue(WriteIOps); + writer.WriteStringValue(WriteIOps.ToString()); writer.WritePropertyName("readIOGiB"u8); writer.WriteNumberValue(ReadIOGiB); writer.WritePropertyName("writeIOGiB"u8); writer.WriteNumberValue(WriteIOGiB); writer.WritePropertyName("numSucceededTasks"u8); - writer.WriteNumberValue(NumSucceededTasks); + writer.WriteStringValue(NumSucceededTasks.ToString()); writer.WritePropertyName("numFailedTasks"u8); - writer.WriteNumberValue(NumFailedTasks); + writer.WriteStringValue(NumFailedTasks.ToString()); writer.WritePropertyName("numTaskRetries"u8); - writer.WriteNumberValue(NumTaskRetries); + writer.WriteStringValue(NumTaskRetries.ToString()); writer.WritePropertyName("waitTime"u8); writer.WriteStringValue(WaitTime, "P"); if (options.Format != "W" && _serializedAdditionalRawData != null) @@ -149,12 +149,12 @@ internal static BatchJobScheduleStatistics DeserializeBatchJobScheduleStatistics } if (property.NameEquals("readIOps"u8)) { - readIOps = property.Value.GetInt64(); + readIOps = long.Parse(property.Value.GetString()); continue; } if (property.NameEquals("writeIOps"u8)) { - writeIOps = property.Value.GetInt64(); + writeIOps = long.Parse(property.Value.GetString()); continue; } if (property.NameEquals("readIOGiB"u8)) @@ -169,17 +169,17 @@ internal static BatchJobScheduleStatistics DeserializeBatchJobScheduleStatistics } if (property.NameEquals("numSucceededTasks"u8)) { - numSucceededTasks = property.Value.GetInt64(); + numSucceededTasks = long.Parse(property.Value.GetString()); continue; } if (property.NameEquals("numFailedTasks"u8)) { - numFailedTasks = property.Value.GetInt64(); + numFailedTasks = long.Parse(property.Value.GetString()); continue; } if (property.NameEquals("numTaskRetries"u8)) { - numTaskRetries = property.Value.GetInt64(); + numTaskRetries = long.Parse(property.Value.GetString()); continue; } if (property.NameEquals("waitTime"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobStatistics.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobStatistics.Serialization.cs index 3fd616c5a571..6c438981a83a 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobStatistics.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobStatistics.Serialization.cs @@ -47,19 +47,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("wallClockTime"u8); writer.WriteStringValue(WallClockTime, "P"); writer.WritePropertyName("readIOps"u8); - writer.WriteNumberValue(ReadIOps); + writer.WriteStringValue(ReadIOps.ToString()); writer.WritePropertyName("writeIOps"u8); - writer.WriteNumberValue(WriteIOps); + writer.WriteStringValue(WriteIOps.ToString()); writer.WritePropertyName("readIOGiB"u8); writer.WriteNumberValue(ReadIOGiB); writer.WritePropertyName("writeIOGiB"u8); writer.WriteNumberValue(WriteIOGiB); writer.WritePropertyName("numSucceededTasks"u8); - writer.WriteNumberValue(NumSucceededTasks); + writer.WriteStringValue(NumSucceededTasks.ToString()); writer.WritePropertyName("numFailedTasks"u8); - writer.WriteNumberValue(NumFailedTasks); + writer.WriteStringValue(NumFailedTasks.ToString()); writer.WritePropertyName("numTaskRetries"u8); - writer.WriteNumberValue(NumTaskRetries); + writer.WriteStringValue(NumTaskRetries.ToString()); writer.WritePropertyName("waitTime"u8); writer.WriteStringValue(WaitTime, "P"); if (options.Format != "W" && _serializedAdditionalRawData != null) @@ -149,12 +149,12 @@ internal static BatchJobStatistics DeserializeBatchJobStatistics(JsonElement ele } if (property.NameEquals("readIOps"u8)) { - readIOps = property.Value.GetInt64(); + readIOps = long.Parse(property.Value.GetString()); continue; } if (property.NameEquals("writeIOps"u8)) { - writeIOps = property.Value.GetInt64(); + writeIOps = long.Parse(property.Value.GetString()); continue; } if (property.NameEquals("readIOGiB"u8)) @@ -169,17 +169,17 @@ internal static BatchJobStatistics DeserializeBatchJobStatistics(JsonElement ele } if (property.NameEquals("numSucceededTasks"u8)) { - numSucceededTasks = property.Value.GetInt64(); + numSucceededTasks = long.Parse(property.Value.GetString()); continue; } if (property.NameEquals("numFailedTasks"u8)) { - numFailedTasks = property.Value.GetInt64(); + numFailedTasks = long.Parse(property.Value.GetString()); continue; } if (property.NameEquals("numTaskRetries"u8)) { - numTaskRetries = property.Value.GetInt64(); + numTaskRetries = long.Parse(property.Value.GetString()); continue; } if (property.NameEquals("waitTime"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateContent.Serialization.cs index ba6caea0e8ae..7a9f484ab1f8 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateContent.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateContent.Serialization.cs @@ -74,6 +74,11 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } writer.WriteEndArray(); } + if (Optional.IsDefined(NetworkConfiguration)) + { + writer.WritePropertyName("networkConfiguration"u8); + writer.WriteObjectValue(NetworkConfiguration, options); + } if (options.Format != "W" && _serializedAdditionalRawData != null) { foreach (var item in _serializedAdditionalRawData) @@ -118,6 +123,7 @@ internal static BatchJobUpdateContent DeserializeBatchJobUpdateContent(JsonEleme BatchPoolInfo poolInfo = default; OnAllBatchTasksComplete? onAllTasksComplete = default; IList metadata = default; + BatchJobNetworkConfiguration networkConfiguration = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -190,6 +196,15 @@ internal static BatchJobUpdateContent DeserializeBatchJobUpdateContent(JsonEleme metadata = array; continue; } + if (property.NameEquals("networkConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + networkConfiguration = BatchJobNetworkConfiguration.DeserializeBatchJobNetworkConfiguration(property.Value, options); + continue; + } if (options.Format != "W") { rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); @@ -204,6 +219,7 @@ internal static BatchJobUpdateContent DeserializeBatchJobUpdateContent(JsonEleme poolInfo, onAllTasksComplete, metadata ?? new ChangeTrackingList(), + networkConfiguration, serializedAdditionalRawData); } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateContent.cs index 5776641d88ea..0cbcd028e9a1 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateContent.cs @@ -59,8 +59,9 @@ public BatchJobUpdateContent() /// The Pool on which the Batch service runs the Job's Tasks. You may change the Pool for a Job only when the Job is disabled. The Patch Job call will fail if you include the poolInfo element and the Job is not disabled. If you specify an autoPoolSpecification in the poolInfo, only the keepAlive property of the autoPoolSpecification can be updated, and then only if the autoPoolSpecification has a poolLifetimeOption of Job (other job properties can be updated as normal). If omitted, the Job continues to run on its current Pool. /// The action the Batch service should take when all Tasks in the Job are in the completed state. If omitted, the completion behavior is left unchanged. You may not change the value from terminatejob to noaction - that is, once you have engaged automatic Job termination, you cannot turn it off again. If you try to do this, the request fails with an 'invalid property value' error response; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). /// A list of name-value pairs associated with the Job as metadata. If omitted, the existing Job metadata is left unchanged. + /// The network configuration for the Job. /// Keeps track of any properties unknown to the library. - internal BatchJobUpdateContent(int? priority, bool? allowTaskPreemption, int? maxParallelTasks, BatchJobConstraints constraints, BatchPoolInfo poolInfo, OnAllBatchTasksComplete? onAllTasksComplete, IList metadata, IDictionary serializedAdditionalRawData) + internal BatchJobUpdateContent(int? priority, bool? allowTaskPreemption, int? maxParallelTasks, BatchJobConstraints constraints, BatchPoolInfo poolInfo, OnAllBatchTasksComplete? onAllTasksComplete, IList metadata, BatchJobNetworkConfiguration networkConfiguration, IDictionary serializedAdditionalRawData) { Priority = priority; AllowTaskPreemption = allowTaskPreemption; @@ -69,6 +70,7 @@ internal BatchJobUpdateContent(int? priority, bool? allowTaskPreemption, int? ma PoolInfo = poolInfo; OnAllTasksComplete = onAllTasksComplete; Metadata = metadata; + NetworkConfiguration = networkConfiguration; _serializedAdditionalRawData = serializedAdditionalRawData; } @@ -86,5 +88,7 @@ internal BatchJobUpdateContent(int? priority, bool? allowTaskPreemption, int? ma public OnAllBatchTasksComplete? OnAllTasksComplete { get; set; } /// A list of name-value pairs associated with the Job as metadata. If omitted, the existing Job metadata is left unchanged. public IList Metadata { get; } + /// The network configuration for the Job. + public BatchJobNetworkConfiguration NetworkConfiguration { get; set; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.Serialization.cs index e866e76adce6..635035a86ee0 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.Serialization.cs @@ -124,6 +124,16 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("startTaskInfo"u8); writer.WriteObjectValue(StartTaskInfo, options); } + if (Optional.IsCollectionDefined(CertificateReferences)) + { + writer.WritePropertyName("certificateReferences"u8); + writer.WriteStartArray(); + foreach (var item in CertificateReferences) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } if (Optional.IsCollectionDefined(Errors)) { writer.WritePropertyName("errors"u8); @@ -208,6 +218,7 @@ internal static BatchNode DeserializeBatchNode(JsonElement element, ModelReaderW IReadOnlyList recentTasks = default; BatchStartTask startTask = default; BatchStartTaskInfo startTaskInfo = default; + IReadOnlyList certificateReferences = default; IReadOnlyList errors = default; bool? isDedicated = default; BatchNodeEndpointConfiguration endpointConfiguration = default; @@ -355,6 +366,20 @@ internal static BatchNode DeserializeBatchNode(JsonElement element, ModelReaderW startTaskInfo = BatchStartTaskInfo.DeserializeBatchStartTaskInfo(property.Value, options); continue; } + if (property.NameEquals("certificateReferences"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(BatchCertificateReference.DeserializeBatchCertificateReference(item, options)); + } + certificateReferences = array; + continue; + } if (property.NameEquals("errors"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -429,6 +454,7 @@ internal static BatchNode DeserializeBatchNode(JsonElement element, ModelReaderW recentTasks ?? new ChangeTrackingList(), startTask, startTaskInfo, + certificateReferences ?? new ChangeTrackingList(), errors ?? new ChangeTrackingList(), isDedicated, endpointConfiguration, diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.cs index a1e4ac04664e..0140f6c364c4 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.cs @@ -49,6 +49,7 @@ public partial class BatchNode internal BatchNode() { RecentTasks = new ChangeTrackingList(); + CertificateReferences = new ChangeTrackingList(); Errors = new ChangeTrackingList(); } @@ -62,7 +63,7 @@ internal BatchNode() /// The time at which this Compute Node was allocated to the Pool. This is the time when the Compute Node was initially allocated and doesn't change once set. It is not updated when the Compute Node is service healed or preempted. /// The IP address that other Nodes can use to communicate with this Compute Node. Every Compute Node that is added to a Pool is assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the IP address is reclaimed and could be reused for new Compute Nodes. /// An identifier which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. - /// The size of the virtual machine hosting the Compute Node. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + /// The size of the virtual machine hosting the Compute Node. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). /// The total number of Job Tasks completed on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. /// The total number of currently running Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. /// The total number of scheduling slots used by currently running Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. @@ -70,13 +71,19 @@ internal BatchNode() /// A list of Tasks whose state has recently changed. This property is present only if at least one Task has run on this Compute Node since it was assigned to the Pool. /// The Task specified to run on the Compute Node as it joins the Pool. /// Runtime information about the execution of the StartTask on the Compute Node. + /// + /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + /// /// The list of errors that are currently being encountered by the Compute Node. /// Whether this Compute Node is a dedicated Compute Node. If false, the Compute Node is a Spot/Low-priority Compute Node. /// The endpoint configuration for the Compute Node. /// Information about the Compute Node agent version and the time the Compute Node upgraded to a new version. /// Info about the current state of the virtual machine. /// Keeps track of any properties unknown to the library. - internal BatchNode(string id, string url, BatchNodeState? state, SchedulingState? schedulingState, DateTimeOffset? stateTransitionTime, DateTimeOffset? lastBootTime, DateTimeOffset? allocationTime, string ipAddress, string affinityId, string vmSize, int? totalTasksRun, int? runningTasksCount, int? runningTaskSlotsCount, int? totalTasksSucceeded, IReadOnlyList recentTasks, BatchStartTask startTask, BatchStartTaskInfo startTaskInfo, IReadOnlyList errors, bool? isDedicated, BatchNodeEndpointConfiguration endpointConfiguration, BatchNodeAgentInfo nodeAgentInfo, VirtualMachineInfo virtualMachineInfo, IDictionary serializedAdditionalRawData) + internal BatchNode(string id, string url, BatchNodeState? state, SchedulingState? schedulingState, DateTimeOffset? stateTransitionTime, DateTimeOffset? lastBootTime, DateTimeOffset? allocationTime, string ipAddress, string affinityId, string vmSize, int? totalTasksRun, int? runningTasksCount, int? runningTaskSlotsCount, int? totalTasksSucceeded, IReadOnlyList recentTasks, BatchStartTask startTask, BatchStartTaskInfo startTaskInfo, IReadOnlyList certificateReferences, IReadOnlyList errors, bool? isDedicated, BatchNodeEndpointConfiguration endpointConfiguration, BatchNodeAgentInfo nodeAgentInfo, VirtualMachineInfo virtualMachineInfo, IDictionary serializedAdditionalRawData) { Id = id; Url = url; @@ -95,6 +102,7 @@ internal BatchNode(string id, string url, BatchNodeState? state, SchedulingState RecentTasks = recentTasks; StartTask = startTask; StartTaskInfo = startTaskInfo; + CertificateReferences = certificateReferences; Errors = errors; IsDedicated = isDedicated; EndpointConfiguration = endpointConfiguration; @@ -121,7 +129,7 @@ internal BatchNode(string id, string url, BatchNodeState? state, SchedulingState public string IpAddress { get; } /// An identifier which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. public string AffinityId { get; } - /// The size of the virtual machine hosting the Compute Node. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + /// The size of the virtual machine hosting the Compute Node. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). public string VmSize { get; } /// The total number of Job Tasks completed on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. public int? TotalTasksRun { get; } @@ -137,6 +145,13 @@ internal BatchNode(string id, string url, BatchNodeState? state, SchedulingState public BatchStartTask StartTask { get; } /// Runtime information about the execution of the StartTask on the Compute Node. public BatchStartTaskInfo StartTaskInfo { get; } + /// + /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + /// + public IReadOnlyList CertificateReferences { get; } /// The list of errors that are currently being encountered by the Compute Node. public IReadOnlyList Errors { get; } /// Whether this Compute Node is a dedicated Compute Node. If false, the Compute Node is a Spot/Low-priority Compute Node. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeCounts.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeCounts.Serialization.cs index d1a64e538f90..221bd5ae525f 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeCounts.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeCounts.Serialization.cs @@ -60,6 +60,10 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WriteNumberValue(Unusable); writer.WritePropertyName("waitingForStartTask"u8); writer.WriteNumberValue(WaitingForStartTask); + writer.WritePropertyName("deallocated"u8); + writer.WriteNumberValue(Deallocated); + writer.WritePropertyName("deallocating"u8); + writer.WriteNumberValue(Deallocating); writer.WritePropertyName("total"u8); writer.WriteNumberValue(Total); writer.WritePropertyName("upgradingOS"u8); @@ -114,6 +118,8 @@ internal static BatchNodeCounts DeserializeBatchNodeCounts(JsonElement element, int unknown = default; int unusable = default; int waitingForStartTask = default; + int deallocated = default; + int deallocating = default; int total = default; int upgradingOS = default; IDictionary serializedAdditionalRawData = default; @@ -185,6 +191,16 @@ internal static BatchNodeCounts DeserializeBatchNodeCounts(JsonElement element, waitingForStartTask = property.Value.GetInt32(); continue; } + if (property.NameEquals("deallocated"u8)) + { + deallocated = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("deallocating"u8)) + { + deallocating = property.Value.GetInt32(); + continue; + } if (property.NameEquals("total"u8)) { total = property.Value.GetInt32(); @@ -215,6 +231,8 @@ internal static BatchNodeCounts DeserializeBatchNodeCounts(JsonElement element, unknown, unusable, waitingForStartTask, + deallocated, + deallocating, total, upgradingOS, serializedAdditionalRawData); diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeCounts.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeCounts.cs index 56ccd9fa2373..04e5b38c071d 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeCounts.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeCounts.cs @@ -59,9 +59,11 @@ public partial class BatchNodeCounts /// The number of Compute Nodes in the unknown state. /// The number of Compute Nodes in the unusable state. /// The number of Compute Nodes in the waitingForStartTask state. + /// The number of Compute Nodes in the deallocated state. + /// The number of Compute Nodes in the deallocating state. /// The total number of Compute Nodes. /// The number of Compute Nodes in the upgradingOS state. - internal BatchNodeCounts(int creating, int idle, int offline, int preempted, int rebooting, int reimaging, int running, int starting, int startTaskFailed, int leavingPool, int unknown, int unusable, int waitingForStartTask, int total, int upgradingOs) + internal BatchNodeCounts(int creating, int idle, int offline, int preempted, int rebooting, int reimaging, int running, int starting, int startTaskFailed, int leavingPool, int unknown, int unusable, int waitingForStartTask, int deallocated, int deallocating, int total, int upgradingOs) { Creating = creating; Idle = idle; @@ -76,6 +78,8 @@ internal BatchNodeCounts(int creating, int idle, int offline, int preempted, int Unknown = unknown; Unusable = unusable; WaitingForStartTask = waitingForStartTask; + Deallocated = deallocated; + Deallocating = deallocating; Total = total; UpgradingOs = upgradingOs; } @@ -94,10 +98,12 @@ internal BatchNodeCounts(int creating, int idle, int offline, int preempted, int /// The number of Compute Nodes in the unknown state. /// The number of Compute Nodes in the unusable state. /// The number of Compute Nodes in the waitingForStartTask state. + /// The number of Compute Nodes in the deallocated state. + /// The number of Compute Nodes in the deallocating state. /// The total number of Compute Nodes. /// The number of Compute Nodes in the upgradingOS state. /// Keeps track of any properties unknown to the library. - internal BatchNodeCounts(int creating, int idle, int offline, int preempted, int rebooting, int reimaging, int running, int starting, int startTaskFailed, int leavingPool, int unknown, int unusable, int waitingForStartTask, int total, int upgradingOs, IDictionary serializedAdditionalRawData) + internal BatchNodeCounts(int creating, int idle, int offline, int preempted, int rebooting, int reimaging, int running, int starting, int startTaskFailed, int leavingPool, int unknown, int unusable, int waitingForStartTask, int deallocated, int deallocating, int total, int upgradingOs, IDictionary serializedAdditionalRawData) { Creating = creating; Idle = idle; @@ -112,6 +118,8 @@ internal BatchNodeCounts(int creating, int idle, int offline, int preempted, int Unknown = unknown; Unusable = unusable; WaitingForStartTask = waitingForStartTask; + Deallocated = deallocated; + Deallocating = deallocating; Total = total; UpgradingOs = upgradingOs; _serializedAdditionalRawData = serializedAdditionalRawData; @@ -148,6 +156,10 @@ internal BatchNodeCounts() public int Unusable { get; } /// The number of Compute Nodes in the waitingForStartTask state. public int WaitingForStartTask { get; } + /// The number of Compute Nodes in the deallocated state. + public int Deallocated { get; } + /// The number of Compute Nodes in the deallocating state. + public int Deallocating { get; } /// The total number of Compute Nodes. public int Total { get; } /// The number of Compute Nodes in the upgradingOS state. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateContent.Serialization.cs new file mode 100644 index 000000000000..6c13b3728b38 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateContent.Serialization.cs @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchNodeDeallocateContent : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + JsonModelWriteCore(writer, options); + writer.WriteEndObject(); + } + + /// The JSON writer. + /// The client options for reading and writing models. + protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeDeallocateContent)} does not support writing '{format}' format."); + } + + if (Optional.IsDefined(NodeDeallocateOption)) + { + writer.WritePropertyName("nodeDeallocateOption"u8); + writer.WriteStringValue(NodeDeallocateOption.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + } + + BatchNodeDeallocateContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeDeallocateContent)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchNodeDeallocateContent(document.RootElement, options); + } + + internal static BatchNodeDeallocateContent DeserializeBatchNodeDeallocateContent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BatchNodeDeallocateOption? nodeDeallocateOption = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("nodeDeallocateOption"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + nodeDeallocateOption = new BatchNodeDeallocateOption(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchNodeDeallocateContent(nodeDeallocateOption, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchNodeDeallocateContent)} does not support writing '{options.Format}' format."); + } + } + + BatchNodeDeallocateContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeBatchNodeDeallocateContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchNodeDeallocateContent)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchNodeDeallocateContent FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeBatchNodeDeallocateContent(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateContent.cs new file mode 100644 index 000000000000..c14a7b03f81e --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateContent.cs @@ -0,0 +1,65 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Options for deallocating a Compute Node. + public partial class BatchNodeDeallocateContent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchNodeDeallocateContent() + { + } + + /// Initializes a new instance of . + /// When to deallocate the Compute Node and what to do with currently running Tasks. The default value is requeue. + /// Keeps track of any properties unknown to the library. + internal BatchNodeDeallocateContent(BatchNodeDeallocateOption? nodeDeallocateOption, IDictionary serializedAdditionalRawData) + { + NodeDeallocateOption = nodeDeallocateOption; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// When to deallocate the Compute Node and what to do with currently running Tasks. The default value is requeue. + public BatchNodeDeallocateOption? NodeDeallocateOption { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateOption.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateOption.cs new file mode 100644 index 000000000000..b4c883a187e5 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateOption.cs @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchNodeDeallocateOption enums. + public readonly partial struct BatchNodeDeallocateOption : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchNodeDeallocateOption(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string RequeueValue = "requeue"; + private const string TerminateValue = "terminate"; + private const string TaskCompletionValue = "taskcompletion"; + private const string RetainedDataValue = "retaineddata"; + + /// Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute Node is available. Deallocate the Compute Node as soon as Tasks have been terminated. + public static BatchNodeDeallocateOption Requeue { get; } = new BatchNodeDeallocateOption(RequeueValue); + /// Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Deallocate the Compute Node as soon as Tasks have been terminated. + public static BatchNodeDeallocateOption Terminate { get; } = new BatchNodeDeallocateOption(TerminateValue); + /// Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Deallocate the Compute Node when all Tasks have completed. + public static BatchNodeDeallocateOption TaskCompletion { get; } = new BatchNodeDeallocateOption(TaskCompletionValue); + /// Allow currently running Tasks to complete, then wait for all Task data retention periods to expire. Schedule no new Tasks while waiting. Deallocate the Compute Node when all Task retention periods have expired. + public static BatchNodeDeallocateOption RetainedData { get; } = new BatchNodeDeallocateOption(RetainedDataValue); + /// Determines if two values are the same. + public static bool operator ==(BatchNodeDeallocateOption left, BatchNodeDeallocateOption right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchNodeDeallocateOption left, BatchNodeDeallocateOption right) => !left.Equals(right); + /// Converts a to a . + public static implicit operator BatchNodeDeallocateOption(string value) => new BatchNodeDeallocateOption(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchNodeDeallocateOption other && Equals(other); + /// + public bool Equals(BatchNodeDeallocateOption other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageContent.Serialization.cs new file mode 100644 index 000000000000..368c54b113bb --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageContent.Serialization.cs @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchNodeReimageContent : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + JsonModelWriteCore(writer, options); + writer.WriteEndObject(); + } + + /// The JSON writer. + /// The client options for reading and writing models. + protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeReimageContent)} does not support writing '{format}' format."); + } + + if (Optional.IsDefined(NodeReimageOption)) + { + writer.WritePropertyName("nodeReimageOption"u8); + writer.WriteStringValue(NodeReimageOption.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + } + + BatchNodeReimageContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeReimageContent)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchNodeReimageContent(document.RootElement, options); + } + + internal static BatchNodeReimageContent DeserializeBatchNodeReimageContent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BatchNodeReimageOption? nodeReimageOption = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("nodeReimageOption"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + nodeReimageOption = new BatchNodeReimageOption(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchNodeReimageContent(nodeReimageOption, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchNodeReimageContent)} does not support writing '{options.Format}' format."); + } + } + + BatchNodeReimageContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeBatchNodeReimageContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchNodeReimageContent)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchNodeReimageContent FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeBatchNodeReimageContent(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageContent.cs new file mode 100644 index 000000000000..8222990b7344 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageContent.cs @@ -0,0 +1,65 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Parameters for reimaging an Azure Batch Compute Node. + public partial class BatchNodeReimageContent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchNodeReimageContent() + { + } + + /// Initializes a new instance of . + /// When to reimage the Compute Node and what to do with currently running Tasks. The default value is requeue. + /// Keeps track of any properties unknown to the library. + internal BatchNodeReimageContent(BatchNodeReimageOption? nodeReimageOption, IDictionary serializedAdditionalRawData) + { + NodeReimageOption = nodeReimageOption; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// When to reimage the Compute Node and what to do with currently running Tasks. The default value is requeue. + public BatchNodeReimageOption? NodeReimageOption { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageOption.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageOption.cs new file mode 100644 index 000000000000..1ceebbf46262 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageOption.cs @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchNodeReimageOption enums. + public readonly partial struct BatchNodeReimageOption : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchNodeReimageOption(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string RequeueValue = "requeue"; + private const string TerminateValue = "terminate"; + private const string TaskCompletionValue = "taskcompletion"; + private const string RetainedDataValue = "retaineddata"; + + /// Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute Node is available. Reimage the Compute Node as soon as Tasks have been terminated. + public static BatchNodeReimageOption Requeue { get; } = new BatchNodeReimageOption(RequeueValue); + /// Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Reimage the Compute Node as soon as Tasks have been terminated. + public static BatchNodeReimageOption Terminate { get; } = new BatchNodeReimageOption(TerminateValue); + /// Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Reimage the Compute Node when all Tasks have completed. + public static BatchNodeReimageOption TaskCompletion { get; } = new BatchNodeReimageOption(TaskCompletionValue); + /// Allow currently running Tasks to complete, then wait for all Task data retention periods to expire. Schedule no new Tasks while waiting. Reimage the Compute Node when all Task retention periods have expired. + public static BatchNodeReimageOption RetainedData { get; } = new BatchNodeReimageOption(RetainedDataValue); + /// Determines if two values are the same. + public static bool operator ==(BatchNodeReimageOption left, BatchNodeReimageOption right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchNodeReimageOption left, BatchNodeReimageOption right) => !left.Equals(right); + /// Converts a to a . + public static implicit operator BatchNodeReimageOption(string value) => new BatchNodeReimageOption(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchNodeReimageOption other && Equals(other); + /// + public bool Equals(BatchNodeReimageOption other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeState.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeState.cs index b986f5ec1350..71bfaaba250f 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeState.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeState.cs @@ -36,6 +36,8 @@ public BatchNodeState(string value) private const string OfflineValue = "offline"; private const string PreemptedValue = "preempted"; private const string UpgradingOSValue = "upgradingos"; + private const string DeallocatedValue = "deallocated"; + private const string DeallocatingValue = "deallocating"; /// The Compute Node is not currently running a Task. public static BatchNodeState Idle { get; } = new BatchNodeState(IdleValue); @@ -65,6 +67,10 @@ public BatchNodeState(string value) public static BatchNodeState Preempted { get; } = new BatchNodeState(PreemptedValue); /// The Compute Node is undergoing an OS upgrade operation. public static BatchNodeState UpgradingOS { get; } = new BatchNodeState(UpgradingOSValue); + /// The Compute Node is deallocated. + public static BatchNodeState Deallocated { get; } = new BatchNodeState(DeallocatedValue); + /// The Compute Node is deallocating. + public static BatchNodeState Deallocating { get; } = new BatchNodeState(DeallocatingValue); /// Determines if two values are the same. public static bool operator ==(BatchNodeState left, BatchNodeState right) => left.Equals(right); /// Determines if two values are not the same. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserCreateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserCreateContent.cs index 0f9fbcc2b158..e3d6007ed5fd 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserCreateContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserCreateContent.cs @@ -59,7 +59,7 @@ public BatchNodeUserCreateContent(string name) /// The user name of the Account. /// Whether the Account should be an administrator on the Compute Node. The default value is false. /// The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. - /// The password of the Account. The password is required for Windows Compute Nodes (those created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. + /// The password of the Account. The password is required for Windows Compute Nodes. For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. /// The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). /// Keeps track of any properties unknown to the library. internal BatchNodeUserCreateContent(string name, bool? isAdmin, DateTimeOffset? expiryTime, string password, string sshPublicKey, IDictionary serializedAdditionalRawData) @@ -83,7 +83,7 @@ internal BatchNodeUserCreateContent() public bool? IsAdmin { get; set; } /// The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. public DateTimeOffset? ExpiryTime { get; set; } - /// The password of the Account. The password is required for Windows Compute Nodes (those created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. + /// The password of the Account. The password is required for Windows Compute Nodes. For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. public string Password { get; set; } /// The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). public string SshPublicKey { get; set; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserUpdateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserUpdateContent.cs index 8ebac642c3d5..9741f08b6208 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserUpdateContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserUpdateContent.cs @@ -51,7 +51,7 @@ public BatchNodeUserUpdateContent() } /// Initializes a new instance of . - /// The password of the Account. The password is required for Windows Compute Nodes (those created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. If omitted, any existing password is removed. + /// The password of the Account. The password is required for Windows Compute Nodes. For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. If omitted, any existing password is removed. /// The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. /// The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If omitted, any existing SSH public key is removed. /// Keeps track of any properties unknown to the library. @@ -63,7 +63,7 @@ internal BatchNodeUserUpdateContent(string password, DateTimeOffset? expiryTime, _serializedAdditionalRawData = serializedAdditionalRawData; } - /// The password of the Account. The password is required for Windows Compute Nodes (those created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. If omitted, any existing password is removed. + /// The password of the Account. The password is required for Windows Compute Nodes. For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. If omitted, any existing password is removed. public string Password { get; set; } /// The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. public DateTimeOffset? ExpiryTime { get; set; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.Serialization.cs index 38003a38b8c5..99646a49b279 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.Serialization.cs @@ -175,6 +175,16 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("startTask"u8); writer.WriteObjectValue(StartTask, options); } + if (options.Format != "W" && Optional.IsCollectionDefined(CertificateReferences)) + { + writer.WritePropertyName("certificateReferences"u8); + writer.WriteStartArray(); + foreach (var item in CertificateReferences) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } if (options.Format != "W" && Optional.IsCollectionDefined(ApplicationPackageReferences)) { writer.WritePropertyName("applicationPackageReferences"u8); @@ -313,6 +323,7 @@ internal static BatchPool DeserializeBatchPool(JsonElement element, ModelReaderW bool? enableInterNodeCommunication = default; NetworkConfiguration networkConfiguration = default; BatchStartTask startTask = default; + IReadOnlyList certificateReferences = default; IReadOnlyList applicationPackageReferences = default; int? taskSlotsPerNode = default; BatchTaskSchedulingPolicy taskSchedulingPolicy = default; @@ -548,6 +559,20 @@ internal static BatchPool DeserializeBatchPool(JsonElement element, ModelReaderW startTask = BatchStartTask.DeserializeBatchStartTask(property.Value, options); continue; } + if (property.NameEquals("certificateReferences"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(BatchCertificateReference.DeserializeBatchCertificateReference(item, options)); + } + certificateReferences = array; + continue; + } if (property.NameEquals("applicationPackageReferences"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -700,6 +725,7 @@ internal static BatchPool DeserializeBatchPool(JsonElement element, ModelReaderW enableInterNodeCommunication, networkConfiguration, startTask, + certificateReferences ?? new ChangeTrackingList(), applicationPackageReferences ?? new ChangeTrackingList(), taskSlotsPerNode, taskSchedulingPolicy, diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.cs index 4cbd3bee1478..db28d05648db 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.cs @@ -50,6 +50,7 @@ internal BatchPool() { ResizeErrors = new ChangeTrackingList(); ResourceTags = new ChangeTrackingDictionary(); + CertificateReferences = new ChangeTrackingList(); ApplicationPackageReferences = new ChangeTrackingList(); UserAccounts = new ChangeTrackingList(); Metadata = new ChangeTrackingList(); @@ -67,7 +68,7 @@ internal BatchPool() /// The time at which the Pool entered its current state. /// Whether the Pool is resizing. /// The time at which the Pool entered its current allocation state. - /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes, see Sizes for Virtual Machines in Azure (https://learn.microsoft.com/azure/virtual-machines/sizes/overview). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). /// The virtual machine configuration for the Pool. This property must be specified. /// The timeout for allocation of Compute Nodes to the Pool. This is the timeout for the most recent resize operation. (The initial sizing when the Pool is created counts as a resize.) The default value is 15 minutes. /// A list of errors encountered while performing the last resize on the Pool. This property is set only if one or more errors occurred during the last Pool resize, and only when the Pool allocationState is Steady. @@ -83,19 +84,25 @@ internal BatchPool() /// Whether the Pool permits direct communication between Compute Nodes. This imposes restrictions on which Compute Nodes can be assigned to the Pool. Specifying this value can reduce the chance of the requested number of Compute Nodes to be allocated in the Pool. /// The network configuration for the Pool. /// A Task specified to run on each Compute Node as it joins the Pool. + /// + /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + /// /// The list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. /// The list of user Accounts to be created on each Compute Node in the Pool. /// A list of name-value pairs associated with the Pool as metadata. - /// Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the CloudPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + /// Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the BatchPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. /// A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. /// The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. /// The desired node communication mode for the pool. If omitted, the default value is Default. /// The current state of the pool communication mode. /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. /// Keeps track of any properties unknown to the library. - internal BatchPool(string id, string displayName, string url, string eTag, DateTimeOffset? lastModified, DateTimeOffset? creationTime, BatchPoolState? state, DateTimeOffset? stateTransitionTime, AllocationState? allocationState, DateTimeOffset? allocationStateTransitionTime, string vmSize, VirtualMachineConfiguration virtualMachineConfiguration, TimeSpan? resizeTimeout, IReadOnlyList resizeErrors, IReadOnlyDictionary resourceTags, int? currentDedicatedNodes, int? currentLowPriorityNodes, int? targetDedicatedNodes, int? targetLowPriorityNodes, bool? enableAutoScale, string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, AutoScaleRun autoScaleRun, bool? enableInterNodeCommunication, NetworkConfiguration networkConfiguration, BatchStartTask startTask, IReadOnlyList applicationPackageReferences, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, IReadOnlyList userAccounts, IReadOnlyList metadata, BatchPoolStatistics stats, IReadOnlyList mountConfiguration, BatchPoolIdentity identity, BatchNodeCommunicationMode? targetNodeCommunicationMode, BatchNodeCommunicationMode? currentNodeCommunicationMode, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) + internal BatchPool(string id, string displayName, string url, string eTag, DateTimeOffset? lastModified, DateTimeOffset? creationTime, BatchPoolState? state, DateTimeOffset? stateTransitionTime, AllocationState? allocationState, DateTimeOffset? allocationStateTransitionTime, string vmSize, VirtualMachineConfiguration virtualMachineConfiguration, TimeSpan? resizeTimeout, IReadOnlyList resizeErrors, IReadOnlyDictionary resourceTags, int? currentDedicatedNodes, int? currentLowPriorityNodes, int? targetDedicatedNodes, int? targetLowPriorityNodes, bool? enableAutoScale, string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, AutoScaleRun autoScaleRun, bool? enableInterNodeCommunication, NetworkConfiguration networkConfiguration, BatchStartTask startTask, IReadOnlyList certificateReferences, IReadOnlyList applicationPackageReferences, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, IReadOnlyList userAccounts, IReadOnlyList metadata, BatchPoolStatistics stats, IReadOnlyList mountConfiguration, BatchPoolIdentity identity, BatchNodeCommunicationMode? targetNodeCommunicationMode, BatchNodeCommunicationMode? currentNodeCommunicationMode, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) { Id = id; DisplayName = displayName; @@ -123,6 +130,7 @@ internal BatchPool(string id, string displayName, string url, string eTag, DateT EnableInterNodeCommunication = enableInterNodeCommunication; NetworkConfiguration = networkConfiguration; StartTask = startTask; + CertificateReferences = certificateReferences; ApplicationPackageReferences = applicationPackageReferences; TaskSlotsPerNode = taskSlotsPerNode; TaskSchedulingPolicy = taskSchedulingPolicy; @@ -157,7 +165,7 @@ internal BatchPool(string id, string displayName, string url, string eTag, DateT public AllocationState? AllocationState { get; } /// The time at which the Pool entered its current allocation state. public DateTimeOffset? AllocationStateTransitionTime { get; } - /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes, see Sizes for Virtual Machines in Azure (https://learn.microsoft.com/azure/virtual-machines/sizes/overview). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). public string VmSize { get; } /// The virtual machine configuration for the Pool. This property must be specified. public VirtualMachineConfiguration VirtualMachineConfiguration { get; } @@ -189,6 +197,13 @@ internal BatchPool(string id, string displayName, string url, string eTag, DateT public NetworkConfiguration NetworkConfiguration { get; } /// A Task specified to run on each Compute Node as it joins the Pool. public BatchStartTask StartTask { get; } + /// + /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + /// + public IReadOnlyList CertificateReferences { get; } /// The list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. public IReadOnlyList ApplicationPackageReferences { get; } /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. @@ -199,7 +214,7 @@ internal BatchPool(string id, string displayName, string url, string eTag, DateT public IReadOnlyList UserAccounts { get; } /// A list of name-value pairs associated with the Pool as metadata. public IReadOnlyList Metadata { get; } - /// Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the CloudPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + /// Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the BatchPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. public BatchPoolStatistics Stats { get; } /// A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. public IReadOnlyList MountConfiguration { get; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateContent.Serialization.cs index 20915f7e4561..3252b216865b 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateContent.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateContent.Serialization.cs @@ -104,6 +104,16 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("startTask"u8); writer.WriteObjectValue(StartTask, options); } + if (Optional.IsCollectionDefined(CertificateReferences)) + { + writer.WritePropertyName("certificateReferences"u8); + writer.WriteStartArray(); + foreach (var item in CertificateReferences) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } if (Optional.IsCollectionDefined(ApplicationPackageReferences)) { writer.WritePropertyName("applicationPackageReferences"u8); @@ -215,6 +225,7 @@ internal static BatchPoolCreateContent DeserializeBatchPoolCreateContent(JsonEle bool? enableInterNodeCommunication = default; NetworkConfiguration networkConfiguration = default; BatchStartTask startTask = default; + IList certificateReferences = default; IList applicationPackageReferences = default; int? taskSlotsPerNode = default; BatchTaskSchedulingPolicy taskSchedulingPolicy = default; @@ -342,6 +353,20 @@ internal static BatchPoolCreateContent DeserializeBatchPoolCreateContent(JsonEle startTask = BatchStartTask.DeserializeBatchStartTask(property.Value, options); continue; } + if (property.NameEquals("certificateReferences"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(BatchCertificateReference.DeserializeBatchCertificateReference(item, options)); + } + certificateReferences = array; + continue; + } if (property.NameEquals("applicationPackageReferences"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -455,6 +480,7 @@ internal static BatchPoolCreateContent DeserializeBatchPoolCreateContent(JsonEle enableInterNodeCommunication, networkConfiguration, startTask, + certificateReferences ?? new ChangeTrackingList(), applicationPackageReferences ?? new ChangeTrackingList(), taskSlotsPerNode, taskSchedulingPolicy, diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateContent.cs index 78653a2849d3..d8fcbc31786e 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateContent.cs @@ -47,7 +47,7 @@ public partial class BatchPoolCreateContent /// Initializes a new instance of . /// A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two Pool IDs within an Account that differ only by case). - /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual Machines in Azure (https://learn.microsoft.com/azure/virtual-machines/sizes/overview). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). /// or is null. public BatchPoolCreateContent(string id, string vmSize) { @@ -57,6 +57,7 @@ public BatchPoolCreateContent(string id, string vmSize) Id = id; VmSize = vmSize; ResourceTags = new ChangeTrackingDictionary(); + CertificateReferences = new ChangeTrackingList(); ApplicationPackageReferences = new ChangeTrackingList(); UserAccounts = new ChangeTrackingList(); Metadata = new ChangeTrackingList(); @@ -66,18 +67,24 @@ public BatchPoolCreateContent(string id, string vmSize) /// Initializes a new instance of . /// A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two Pool IDs within an Account that differ only by case). /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. - /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual Machines in Azure (https://learn.microsoft.com/azure/virtual-machines/sizes/overview). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). /// The virtual machine configuration for the Pool. This property must be specified. /// The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). /// The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. /// The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. /// The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. /// Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false. - /// A formula for the desired number of Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see 'Automatically scale Compute Nodes in an Azure Batch Pool' (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). + /// A formula for the desired number of Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see 'Automatically scale Compute Nodes in an Azure Batch Pool' (https://learn.microsoft.com/azure/batch/batch-automatic-scaling). /// The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). /// Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false. /// The network configuration for the Pool. /// A Task specified to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. + /// + /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + /// /// The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. @@ -87,7 +94,7 @@ public BatchPoolCreateContent(string id, string vmSize) /// The desired node communication mode for the pool. If omitted, the default value is Default. /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. /// Keeps track of any properties unknown to the library. - internal BatchPoolCreateContent(string id, string displayName, string vmSize, VirtualMachineConfiguration virtualMachineConfiguration, TimeSpan? resizeTimeout, IDictionary resourceTags, int? targetDedicatedNodes, int? targetLowPriorityNodes, bool? enableAutoScale, string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, bool? enableInterNodeCommunication, NetworkConfiguration networkConfiguration, BatchStartTask startTask, IList applicationPackageReferences, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, IList userAccounts, IList metadata, IList mountConfiguration, BatchNodeCommunicationMode? targetNodeCommunicationMode, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) + internal BatchPoolCreateContent(string id, string displayName, string vmSize, VirtualMachineConfiguration virtualMachineConfiguration, TimeSpan? resizeTimeout, IDictionary resourceTags, int? targetDedicatedNodes, int? targetLowPriorityNodes, bool? enableAutoScale, string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, bool? enableInterNodeCommunication, NetworkConfiguration networkConfiguration, BatchStartTask startTask, IList certificateReferences, IList applicationPackageReferences, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, IList userAccounts, IList metadata, IList mountConfiguration, BatchNodeCommunicationMode? targetNodeCommunicationMode, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) { Id = id; DisplayName = displayName; @@ -103,6 +110,7 @@ internal BatchPoolCreateContent(string id, string displayName, string vmSize, Vi EnableInterNodeCommunication = enableInterNodeCommunication; NetworkConfiguration = networkConfiguration; StartTask = startTask; + CertificateReferences = certificateReferences; ApplicationPackageReferences = applicationPackageReferences; TaskSlotsPerNode = taskSlotsPerNode; TaskSchedulingPolicy = taskSchedulingPolicy; @@ -123,7 +131,7 @@ internal BatchPoolCreateContent() public string Id { get; } /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. public string DisplayName { get; set; } - /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual Machines in Azure (https://learn.microsoft.com/azure/virtual-machines/sizes/overview). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). public string VmSize { get; } /// The virtual machine configuration for the Pool. This property must be specified. public VirtualMachineConfiguration VirtualMachineConfiguration { get; set; } @@ -137,7 +145,7 @@ internal BatchPoolCreateContent() public int? TargetLowPriorityNodes { get; set; } /// Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false. public bool? EnableAutoScale { get; set; } - /// A formula for the desired number of Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see 'Automatically scale Compute Nodes in an Azure Batch Pool' (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). + /// A formula for the desired number of Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see 'Automatically scale Compute Nodes in an Azure Batch Pool' (https://learn.microsoft.com/azure/batch/batch-automatic-scaling). public string AutoScaleFormula { get; set; } /// The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). public TimeSpan? AutoScaleEvaluationInterval { get; set; } @@ -147,6 +155,13 @@ internal BatchPoolCreateContent() public NetworkConfiguration NetworkConfiguration { get; set; } /// A Task specified to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. public BatchStartTask StartTask { get; set; } + /// + /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + /// + public IList CertificateReferences { get; } /// The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. public IList ApplicationPackageReferences { get; } /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEnableAutoScaleContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEnableAutoScaleContent.cs index 13446bd47d3c..b3b15b6404d6 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEnableAutoScaleContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEnableAutoScaleContent.cs @@ -51,7 +51,7 @@ public BatchPoolEnableAutoScaleContent() } /// Initializes a new instance of . - /// The formula for the desired number of Compute Nodes in the Pool. The formula is checked for validity before it is applied to the Pool. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + /// The formula for the desired number of Compute Nodes in the Pool. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, with its starting time being the time when this request was issued. /// The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, with its starting time being the time when this request was issued. /// Keeps track of any properties unknown to the library. internal BatchPoolEnableAutoScaleContent(string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, IDictionary serializedAdditionalRawData) @@ -61,7 +61,7 @@ internal BatchPoolEnableAutoScaleContent(string autoScaleFormula, TimeSpan? auto _serializedAdditionalRawData = serializedAdditionalRawData; } - /// The formula for the desired number of Compute Nodes in the Pool. The formula is checked for validity before it is applied to the Pool. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + /// The formula for the desired number of Compute Nodes in the Pool. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, with its starting time being the time when this request was issued. public string AutoScaleFormula { get; set; } /// The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, with its starting time being the time when this request was issued. public TimeSpan? AutoScaleEvaluationInterval { get; set; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEvaluateAutoScaleContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEvaluateAutoScaleContent.cs index b89f8e7df1cd..50097db45eac 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEvaluateAutoScaleContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEvaluateAutoScaleContent.cs @@ -46,7 +46,7 @@ public partial class BatchPoolEvaluateAutoScaleContent private IDictionary _serializedAdditionalRawData; /// Initializes a new instance of . - /// The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + /// The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-automatic-scaling). /// is null. public BatchPoolEvaluateAutoScaleContent(string autoScaleFormula) { @@ -56,7 +56,7 @@ public BatchPoolEvaluateAutoScaleContent(string autoScaleFormula) } /// Initializes a new instance of . - /// The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + /// The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-automatic-scaling). /// Keeps track of any properties unknown to the library. internal BatchPoolEvaluateAutoScaleContent(string autoScaleFormula, IDictionary serializedAdditionalRawData) { @@ -69,7 +69,7 @@ internal BatchPoolEvaluateAutoScaleContent() { } - /// The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + /// The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-automatic-scaling). public string AutoScaleFormula { get; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceContent.Serialization.cs index 7f8bf2a10295..6bb3b647290d 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceContent.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceContent.Serialization.cs @@ -29,6 +29,13 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("startTask"u8); writer.WriteObjectValue(StartTask, options); } + writer.WritePropertyName("certificateReferences"u8); + writer.WriteStartArray(); + foreach (var item in CertificateReferences) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); writer.WritePropertyName("applicationPackageReferences"u8); writer.WriteStartArray(); foreach (var item in ApplicationPackageReferences) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceContent.cs index 15a93a07a92a..0db0b9f350b2 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceContent.cs @@ -47,27 +47,46 @@ public partial class BatchPoolReplaceContent private IDictionary _serializedAdditionalRawData; /// Initializes a new instance of . + /// + /// This list replaces any existing Certificate references configured on the Pool. + /// If you specify an empty collection, any existing Certificate references are removed from the Pool. + /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + /// /// The list of Application Packages to be installed on each Compute Node in the Pool. The list replaces any existing Application Package references on the Pool. Changes to Application Package references affect all new Compute Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Application Package references on any given Pool. If omitted, or if you specify an empty collection, any existing Application Packages references are removed from the Pool. A maximum of 10 references may be specified on a given Pool. /// A list of name-value pairs associated with the Pool as metadata. This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool. - /// or is null. - public BatchPoolReplaceContent(IEnumerable applicationPackageReferences, IEnumerable metadata) + /// , or is null. + public BatchPoolReplaceContent(IEnumerable certificateReferences, IEnumerable applicationPackageReferences, IEnumerable metadata) { + Argument.AssertNotNull(certificateReferences, nameof(certificateReferences)); Argument.AssertNotNull(applicationPackageReferences, nameof(applicationPackageReferences)); Argument.AssertNotNull(metadata, nameof(metadata)); + CertificateReferences = certificateReferences.ToList(); ApplicationPackageReferences = applicationPackageReferences.ToList(); Metadata = metadata.ToList(); } /// Initializes a new instance of . /// A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is removed from the Pool. + /// + /// This list replaces any existing Certificate references configured on the Pool. + /// If you specify an empty collection, any existing Certificate references are removed from the Pool. + /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + /// /// The list of Application Packages to be installed on each Compute Node in the Pool. The list replaces any existing Application Package references on the Pool. Changes to Application Package references affect all new Compute Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Application Package references on any given Pool. If omitted, or if you specify an empty collection, any existing Application Packages references are removed from the Pool. A maximum of 10 references may be specified on a given Pool. /// A list of name-value pairs associated with the Pool as metadata. This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool. /// The desired node communication mode for the pool. This setting replaces any existing targetNodeCommunication setting on the Pool. If omitted, the existing setting is default. /// Keeps track of any properties unknown to the library. - internal BatchPoolReplaceContent(BatchStartTask startTask, IList applicationPackageReferences, IList metadata, BatchNodeCommunicationMode? targetNodeCommunicationMode, IDictionary serializedAdditionalRawData) + internal BatchPoolReplaceContent(BatchStartTask startTask, IList certificateReferences, IList applicationPackageReferences, IList metadata, BatchNodeCommunicationMode? targetNodeCommunicationMode, IDictionary serializedAdditionalRawData) { StartTask = startTask; + CertificateReferences = certificateReferences; ApplicationPackageReferences = applicationPackageReferences; Metadata = metadata; TargetNodeCommunicationMode = targetNodeCommunicationMode; @@ -81,6 +100,15 @@ internal BatchPoolReplaceContent() /// A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is removed from the Pool. public BatchStartTask StartTask { get; set; } + /// + /// This list replaces any existing Certificate references configured on the Pool. + /// If you specify an empty collection, any existing Certificate references are removed from the Pool. + /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + /// + public IList CertificateReferences { get; } /// The list of Application Packages to be installed on each Compute Node in the Pool. The list replaces any existing Application Package references on the Pool. Changes to Application Package references affect all new Compute Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Application Package references on any given Pool. If omitted, or if you specify an empty collection, any existing Application Packages references are removed from the Pool. A maximum of 10 references may be specified on a given Pool. public IList ApplicationPackageReferences { get; } /// A list of name-value pairs associated with the Pool as metadata. This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResourceStatistics.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResourceStatistics.Serialization.cs index 85a112746f52..560ae39372e5 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResourceStatistics.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResourceStatistics.Serialization.cs @@ -49,9 +49,9 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("peakDiskGiB"u8); writer.WriteNumberValue(PeakDiskGiB); writer.WritePropertyName("diskReadIOps"u8); - writer.WriteNumberValue(DiskReadIOps); + writer.WriteStringValue(DiskReadIOps.ToString()); writer.WritePropertyName("diskWriteIOps"u8); - writer.WriteNumberValue(DiskWriteIOps); + writer.WriteStringValue(DiskWriteIOps.ToString()); writer.WritePropertyName("diskReadGiB"u8); writer.WriteNumberValue(DiskReadGiB); writer.WritePropertyName("diskWriteGiB"u8); @@ -151,12 +151,12 @@ internal static BatchPoolResourceStatistics DeserializeBatchPoolResourceStatisti } if (property.NameEquals("diskReadIOps"u8)) { - diskReadIOps = property.Value.GetInt64(); + diskReadIOps = long.Parse(property.Value.GetString()); continue; } if (property.NameEquals("diskWriteIOps"u8)) { - diskWriteIOps = property.Value.GetInt64(); + diskWriteIOps = long.Parse(property.Value.GetString()); continue; } if (property.NameEquals("diskReadGiB"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.Serialization.cs index b13e093d5682..31aac819b0ac 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.Serialization.cs @@ -106,6 +106,16 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("startTask"u8); writer.WriteObjectValue(StartTask, options); } + if (Optional.IsCollectionDefined(CertificateReferences)) + { + writer.WritePropertyName("certificateReferences"u8); + writer.WriteStartArray(); + foreach (var item in CertificateReferences) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } if (Optional.IsCollectionDefined(ApplicationPackageReferences)) { writer.WritePropertyName("applicationPackageReferences"u8); @@ -208,6 +218,7 @@ internal static BatchPoolSpecification DeserializeBatchPoolSpecification(JsonEle bool? enableInterNodeCommunication = default; NetworkConfiguration networkConfiguration = default; BatchStartTask startTask = default; + IList certificateReferences = default; IList applicationPackageReferences = default; IList userAccounts = default; IList metadata = default; @@ -337,6 +348,20 @@ internal static BatchPoolSpecification DeserializeBatchPoolSpecification(JsonEle startTask = BatchStartTask.DeserializeBatchStartTask(property.Value, options); continue; } + if (property.NameEquals("certificateReferences"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(BatchCertificateReference.DeserializeBatchCertificateReference(item, options)); + } + certificateReferences = array; + continue; + } if (property.NameEquals("applicationPackageReferences"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -433,6 +458,7 @@ internal static BatchPoolSpecification DeserializeBatchPoolSpecification(JsonEle enableInterNodeCommunication, networkConfiguration, startTask, + certificateReferences ?? new ChangeTrackingList(), applicationPackageReferences ?? new ChangeTrackingList(), userAccounts ?? new ChangeTrackingList(), metadata ?? new ChangeTrackingList(), diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.cs index 758fe6016c02..00d8012ecbf0 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.cs @@ -46,13 +46,14 @@ public partial class BatchPoolSpecification private IDictionary _serializedAdditionalRawData; /// Initializes a new instance of . - /// The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + /// The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). /// is null. public BatchPoolSpecification(string vmSize) { Argument.AssertNotNull(vmSize, nameof(vmSize)); VmSize = vmSize; + CertificateReferences = new ChangeTrackingList(); ApplicationPackageReferences = new ChangeTrackingList(); UserAccounts = new ChangeTrackingList(); Metadata = new ChangeTrackingList(); @@ -61,8 +62,8 @@ public BatchPoolSpecification(string vmSize) /// Initializes a new instance of . /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. - /// The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). - /// The virtual machine configuration for the Pool. This property must be specified if the Pool needs to be created with Azure IaaS VMs. If it is not specified then the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + /// The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). + /// The virtual machine configuration for the Pool. This property must be specified. /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. /// The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service rejects the request with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). @@ -75,6 +76,11 @@ public BatchPoolSpecification(string vmSize) /// Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false. /// The network configuration for the Pool. /// A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. + /// + /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + /// Warning: This property is deprecated and will be removed after February, 2024. + /// Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + /// /// The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. /// The list of user Accounts to be created on each Compute Node in the Pool. /// A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. @@ -82,7 +88,7 @@ public BatchPoolSpecification(string vmSize) /// The desired node communication mode for the pool. If omitted, the default value is Default. /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. /// Keeps track of any properties unknown to the library. - internal BatchPoolSpecification(string displayName, string vmSize, VirtualMachineConfiguration virtualMachineConfiguration, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, TimeSpan? resizeTimeout, string resourceTags, int? targetDedicatedNodes, int? targetLowPriorityNodes, bool? enableAutoScale, string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, bool? enableInterNodeCommunication, NetworkConfiguration networkConfiguration, BatchStartTask startTask, IList applicationPackageReferences, IList userAccounts, IList metadata, IList mountConfiguration, BatchNodeCommunicationMode? targetNodeCommunicationMode, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) + internal BatchPoolSpecification(string displayName, string vmSize, VirtualMachineConfiguration virtualMachineConfiguration, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, TimeSpan? resizeTimeout, string resourceTags, int? targetDedicatedNodes, int? targetLowPriorityNodes, bool? enableAutoScale, string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, bool? enableInterNodeCommunication, NetworkConfiguration networkConfiguration, BatchStartTask startTask, IList certificateReferences, IList applicationPackageReferences, IList userAccounts, IList metadata, IList mountConfiguration, BatchNodeCommunicationMode? targetNodeCommunicationMode, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) { DisplayName = displayName; VmSize = vmSize; @@ -99,6 +105,7 @@ internal BatchPoolSpecification(string displayName, string vmSize, VirtualMachin EnableInterNodeCommunication = enableInterNodeCommunication; NetworkConfiguration = networkConfiguration; StartTask = startTask; + CertificateReferences = certificateReferences; ApplicationPackageReferences = applicationPackageReferences; UserAccounts = userAccounts; Metadata = metadata; @@ -115,9 +122,9 @@ internal BatchPoolSpecification() /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. public string DisplayName { get; set; } - /// The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + /// The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). public string VmSize { get; set; } - /// The virtual machine configuration for the Pool. This property must be specified if the Pool needs to be created with Azure IaaS VMs. If it is not specified then the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + /// The virtual machine configuration for the Pool. This property must be specified. public VirtualMachineConfiguration VirtualMachineConfiguration { get; set; } /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. public int? TaskSlotsPerNode { get; set; } @@ -143,6 +150,12 @@ internal BatchPoolSpecification() public NetworkConfiguration NetworkConfiguration { get; set; } /// A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. public BatchStartTask StartTask { get; set; } + /// + /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + /// Warning: This property is deprecated and will be removed after February, 2024. + /// Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + /// + public IList CertificateReferences { get; } /// The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. public IList ApplicationPackageReferences { get; } /// The list of user Accounts to be created on each Compute Node in the Pool. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateContent.Serialization.cs index d8e67f439237..7863b2ab66b4 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateContent.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateContent.Serialization.cs @@ -34,11 +34,36 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit throw new FormatException($"The model {nameof(BatchPoolUpdateContent)} does not support writing '{format}' format."); } + if (Optional.IsDefined(DisplayName)) + { + writer.WritePropertyName("displayName"u8); + writer.WriteStringValue(DisplayName); + } + if (Optional.IsDefined(VmSize)) + { + writer.WritePropertyName("vmSize"u8); + writer.WriteStringValue(VmSize); + } + if (Optional.IsDefined(EnableInterNodeCommunication)) + { + writer.WritePropertyName("enableInterNodeCommunication"u8); + writer.WriteBooleanValue(EnableInterNodeCommunication.Value); + } if (Optional.IsDefined(StartTask)) { writer.WritePropertyName("startTask"u8); writer.WriteObjectValue(StartTask, options); } + if (Optional.IsCollectionDefined(CertificateReferences)) + { + writer.WritePropertyName("certificateReferences"u8); + writer.WriteStartArray(); + foreach (var item in CertificateReferences) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } if (Optional.IsCollectionDefined(ApplicationPackageReferences)) { writer.WritePropertyName("applicationPackageReferences"u8); @@ -59,11 +84,67 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } writer.WriteEndArray(); } + if (Optional.IsDefined(VirtualMachineConfiguration)) + { + writer.WritePropertyName("virtualMachineConfiguration"u8); + writer.WriteObjectValue(VirtualMachineConfiguration, options); + } if (Optional.IsDefined(TargetNodeCommunicationMode)) { writer.WritePropertyName("targetNodeCommunicationMode"u8); writer.WriteStringValue(TargetNodeCommunicationMode.Value.ToString()); } + if (Optional.IsDefined(TaskSlotsPerNode)) + { + writer.WritePropertyName("taskSlotsPerNode"u8); + writer.WriteNumberValue(TaskSlotsPerNode.Value); + } + if (Optional.IsDefined(TaskSchedulingPolicy)) + { + writer.WritePropertyName("taskSchedulingPolicy"u8); + writer.WriteObjectValue(TaskSchedulingPolicy, options); + } + if (Optional.IsDefined(NetworkConfiguration)) + { + writer.WritePropertyName("networkConfiguration"u8); + writer.WriteObjectValue(NetworkConfiguration, options); + } + if (Optional.IsCollectionDefined(ResourceTags)) + { + writer.WritePropertyName("resourceTags"u8); + writer.WriteStartObject(); + foreach (var item in ResourceTags) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + if (Optional.IsCollectionDefined(UserAccounts)) + { + writer.WritePropertyName("userAccounts"u8); + writer.WriteStartArray(); + foreach (var item in UserAccounts) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(MountConfiguration)) + { + writer.WritePropertyName("mountConfiguration"u8); + writer.WriteStartArray(); + foreach (var item in MountConfiguration) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsDefined(UpgradePolicy)) + { + writer.WritePropertyName("upgradePolicy"u8); + writer.WriteObjectValue(UpgradePolicy, options); + } if (options.Format != "W" && _serializedAdditionalRawData != null) { foreach (var item in _serializedAdditionalRawData) @@ -101,14 +182,45 @@ internal static BatchPoolUpdateContent DeserializeBatchPoolUpdateContent(JsonEle { return null; } + string displayName = default; + string vmSize = default; + bool? enableInterNodeCommunication = default; BatchStartTask startTask = default; + IList certificateReferences = default; IList applicationPackageReferences = default; IList metadata = default; + VirtualMachineConfiguration virtualMachineConfiguration = default; BatchNodeCommunicationMode? targetNodeCommunicationMode = default; + int? taskSlotsPerNode = default; + BatchTaskSchedulingPolicy taskSchedulingPolicy = default; + NetworkConfiguration networkConfiguration = default; + IDictionary resourceTags = default; + IList userAccounts = default; + IList mountConfiguration = default; + UpgradePolicy upgradePolicy = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) { + if (property.NameEquals("displayName"u8)) + { + displayName = property.Value.GetString(); + continue; + } + if (property.NameEquals("vmSize"u8)) + { + vmSize = property.Value.GetString(); + continue; + } + if (property.NameEquals("enableInterNodeCommunication"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + enableInterNodeCommunication = property.Value.GetBoolean(); + continue; + } if (property.NameEquals("startTask"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -118,6 +230,20 @@ internal static BatchPoolUpdateContent DeserializeBatchPoolUpdateContent(JsonEle startTask = BatchStartTask.DeserializeBatchStartTask(property.Value, options); continue; } + if (property.NameEquals("certificateReferences"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(BatchCertificateReference.DeserializeBatchCertificateReference(item, options)); + } + certificateReferences = array; + continue; + } if (property.NameEquals("applicationPackageReferences"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -146,6 +272,15 @@ internal static BatchPoolUpdateContent DeserializeBatchPoolUpdateContent(JsonEle metadata = array; continue; } + if (property.NameEquals("virtualMachineConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + virtualMachineConfiguration = VirtualMachineConfiguration.DeserializeVirtualMachineConfiguration(property.Value, options); + continue; + } if (property.NameEquals("targetNodeCommunicationMode"u8)) { if (property.Value.ValueKind == JsonValueKind.Null) @@ -155,13 +290,108 @@ internal static BatchPoolUpdateContent DeserializeBatchPoolUpdateContent(JsonEle targetNodeCommunicationMode = new BatchNodeCommunicationMode(property.Value.GetString()); continue; } + if (property.NameEquals("taskSlotsPerNode"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + taskSlotsPerNode = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("taskSchedulingPolicy"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + taskSchedulingPolicy = BatchTaskSchedulingPolicy.DeserializeBatchTaskSchedulingPolicy(property.Value, options); + continue; + } + if (property.NameEquals("networkConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + networkConfiguration = NetworkConfiguration.DeserializeNetworkConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("resourceTags"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + resourceTags = dictionary; + continue; + } + if (property.NameEquals("userAccounts"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(UserAccount.DeserializeUserAccount(item, options)); + } + userAccounts = array; + continue; + } + if (property.NameEquals("mountConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(Batch.MountConfiguration.DeserializeMountConfiguration(item, options)); + } + mountConfiguration = array; + continue; + } + if (property.NameEquals("upgradePolicy"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + upgradePolicy = UpgradePolicy.DeserializeUpgradePolicy(property.Value, options); + continue; + } if (options.Format != "W") { rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); } } serializedAdditionalRawData = rawDataDictionary; - return new BatchPoolUpdateContent(startTask, applicationPackageReferences ?? new ChangeTrackingList(), metadata ?? new ChangeTrackingList(), targetNodeCommunicationMode, serializedAdditionalRawData); + return new BatchPoolUpdateContent( + displayName, + vmSize, + enableInterNodeCommunication, + startTask, + certificateReferences ?? new ChangeTrackingList(), + applicationPackageReferences ?? new ChangeTrackingList(), + metadata ?? new ChangeTrackingList(), + virtualMachineConfiguration, + targetNodeCommunicationMode, + taskSlotsPerNode, + taskSchedulingPolicy, + networkConfiguration, + resourceTags ?? new ChangeTrackingDictionary(), + userAccounts ?? new ChangeTrackingList(), + mountConfiguration ?? new ChangeTrackingList(), + upgradePolicy, + serializedAdditionalRawData); } BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateContent.cs index 1300eb052c5c..f51418e24d54 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateContent.cs @@ -48,32 +48,98 @@ public partial class BatchPoolUpdateContent /// Initializes a new instance of . public BatchPoolUpdateContent() { + CertificateReferences = new ChangeTrackingList(); ApplicationPackageReferences = new ChangeTrackingList(); Metadata = new ChangeTrackingList(); + ResourceTags = new ChangeTrackingDictionary(); + UserAccounts = new ChangeTrackingList(); + MountConfiguration = new ChangeTrackingList(); } /// Initializes a new instance of . + /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. This field can be updated only when the pool is empty. + /// The size of virtual machines in the Pool. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes).<br /><br />This field can be updated only when the pool is empty. + /// Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false.<br /><br />This field can be updated only when the pool is empty. /// A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is left unchanged. + /// + /// If this element is present, it replaces any existing Certificate references configured on the Pool. + /// If omitted, any existing Certificate references are left unchanged. + /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + /// /// A list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. If this element is present, it replaces any existing Package references. If you specify an empty collection, then all Package references are removed from the Pool. If omitted, any existing Package references are left unchanged. /// A list of name-value pairs associated with the Pool as metadata. If this element is present, it replaces any existing metadata configured on the Pool. If you specify an empty collection, any metadata is removed from the Pool. If omitted, any existing metadata is left unchanged. + /// The virtual machine configuration for the Pool. This property must be specified.<br /><br />This field can be updated only when the pool is empty. /// The desired node communication mode for the pool. If this element is present, it replaces the existing targetNodeCommunicationMode configured on the Pool. If omitted, any existing metadata is left unchanged. + /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256.<br /><br />This field can be updated only when the pool is empty. + /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread.<br /><br />This field can be updated only when the pool is empty. + /// The network configuration for the Pool. This field can be updated only when the pool is empty. + /// The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'.<br /><br />This field can be updated only when the pool is empty. + /// The list of user Accounts to be created on each Compute Node in the Pool. This field can be updated only when the pool is empty. + /// Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system.<br /><br />This field can be updated only when the pool is empty. + /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling.<br /><br />This field can be updated only when the pool is empty. /// Keeps track of any properties unknown to the library. - internal BatchPoolUpdateContent(BatchStartTask startTask, IList applicationPackageReferences, IList metadata, BatchNodeCommunicationMode? targetNodeCommunicationMode, IDictionary serializedAdditionalRawData) + internal BatchPoolUpdateContent(string displayName, string vmSize, bool? enableInterNodeCommunication, BatchStartTask startTask, IList certificateReferences, IList applicationPackageReferences, IList metadata, VirtualMachineConfiguration virtualMachineConfiguration, BatchNodeCommunicationMode? targetNodeCommunicationMode, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, NetworkConfiguration networkConfiguration, IDictionary resourceTags, IList userAccounts, IList mountConfiguration, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) { + DisplayName = displayName; + VmSize = vmSize; + EnableInterNodeCommunication = enableInterNodeCommunication; StartTask = startTask; + CertificateReferences = certificateReferences; ApplicationPackageReferences = applicationPackageReferences; Metadata = metadata; + VirtualMachineConfiguration = virtualMachineConfiguration; TargetNodeCommunicationMode = targetNodeCommunicationMode; + TaskSlotsPerNode = taskSlotsPerNode; + TaskSchedulingPolicy = taskSchedulingPolicy; + NetworkConfiguration = networkConfiguration; + ResourceTags = resourceTags; + UserAccounts = userAccounts; + MountConfiguration = mountConfiguration; + UpgradePolicy = upgradePolicy; _serializedAdditionalRawData = serializedAdditionalRawData; } + /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. This field can be updated only when the pool is empty. + public string DisplayName { get; set; } + /// The size of virtual machines in the Pool. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes).<br /><br />This field can be updated only when the pool is empty. + public string VmSize { get; set; } + /// Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false.<br /><br />This field can be updated only when the pool is empty. + public bool? EnableInterNodeCommunication { get; set; } /// A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is left unchanged. public BatchStartTask StartTask { get; set; } + /// + /// If this element is present, it replaces any existing Certificate references configured on the Pool. + /// If omitted, any existing Certificate references are left unchanged. + /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + /// + public IList CertificateReferences { get; } /// A list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. If this element is present, it replaces any existing Package references. If you specify an empty collection, then all Package references are removed from the Pool. If omitted, any existing Package references are left unchanged. public IList ApplicationPackageReferences { get; } /// A list of name-value pairs associated with the Pool as metadata. If this element is present, it replaces any existing metadata configured on the Pool. If you specify an empty collection, any metadata is removed from the Pool. If omitted, any existing metadata is left unchanged. public IList Metadata { get; } + /// The virtual machine configuration for the Pool. This property must be specified.<br /><br />This field can be updated only when the pool is empty. + public VirtualMachineConfiguration VirtualMachineConfiguration { get; set; } /// The desired node communication mode for the pool. If this element is present, it replaces the existing targetNodeCommunicationMode configured on the Pool. If omitted, any existing metadata is left unchanged. public BatchNodeCommunicationMode? TargetNodeCommunicationMode { get; set; } + /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256.<br /><br />This field can be updated only when the pool is empty. + public int? TaskSlotsPerNode { get; set; } + /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread.<br /><br />This field can be updated only when the pool is empty. + public BatchTaskSchedulingPolicy TaskSchedulingPolicy { get; set; } + /// The network configuration for the Pool. This field can be updated only when the pool is empty. + public NetworkConfiguration NetworkConfiguration { get; set; } + /// The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'.<br /><br />This field can be updated only when the pool is empty. + public IDictionary ResourceTags { get; } + /// The list of user Accounts to be created on each Compute Node in the Pool. This field can be updated only when the pool is empty. + public IList UserAccounts { get; } + /// Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system.<br /><br />This field can be updated only when the pool is empty. + public IList MountConfiguration { get; } + /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling.<br /><br />This field can be updated only when the pool is empty. + public UpgradePolicy UpgradePolicy { get; set; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUsageMetrics.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUsageMetrics.cs index c28463c07907..66fee7143432 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUsageMetrics.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUsageMetrics.cs @@ -49,7 +49,7 @@ public partial class BatchPoolUsageMetrics /// The ID of the Pool whose metrics are aggregated in this entry. /// The start time of the aggregation interval covered by this entry. /// The end time of the aggregation interval covered by this entry. - /// The size of virtual machines in the Pool. All VMs in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + /// The size of virtual machines in the Pool. All VMs in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). /// The total core hours used in the Pool during this aggregation interval. /// or is null. internal BatchPoolUsageMetrics(string poolId, DateTimeOffset startTime, DateTimeOffset endTime, string vmSize, float totalCoreHours) @@ -68,7 +68,7 @@ internal BatchPoolUsageMetrics(string poolId, DateTimeOffset startTime, DateTime /// The ID of the Pool whose metrics are aggregated in this entry. /// The start time of the aggregation interval covered by this entry. /// The end time of the aggregation interval covered by this entry. - /// The size of virtual machines in the Pool. All VMs in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + /// The size of virtual machines in the Pool. All VMs in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). /// The total core hours used in the Pool during this aggregation interval. /// Keeps track of any properties unknown to the library. internal BatchPoolUsageMetrics(string poolId, DateTimeOffset startTime, DateTimeOffset endTime, string vmSize, float totalCoreHours, IDictionary serializedAdditionalRawData) @@ -92,7 +92,7 @@ internal BatchPoolUsageMetrics() public DateTimeOffset StartTime { get; } /// The end time of the aggregation interval covered by this entry. public DateTimeOffset EndTime { get; } - /// The size of virtual machines in the Pool. All VMs in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + /// The size of virtual machines in the Pool. All VMs in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). public string VmSize { get; } /// The total core hours used in the Pool during this aggregation interval. public float TotalCoreHours { get; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchStartTask.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchStartTask.cs index d83549ff0698..4adb269470aa 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchStartTask.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchStartTask.cs @@ -60,7 +60,7 @@ public partial class BatchStartTask private IDictionary _serializedAdditionalRawData; /// Initializes a new instance of . - /// The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). /// is null. public BatchStartTask(string commandLine) { @@ -72,7 +72,7 @@ public BatchStartTask(string commandLine) } /// Initializes a new instance of . - /// The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). /// The settings for the container under which the StartTask runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. /// A list of files that the Batch service will download to the Compute Node before running the command line. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. Files listed under this element are located in the Task's working directory. /// A list of environment variable settings for the StartTask. @@ -97,7 +97,7 @@ internal BatchStartTask() { } - /// The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). public string CommandLine { get; set; } /// The settings for the container under which the StartTask runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. public BatchTaskContainerSettings ContainerSettings { get; set; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTask.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTask.cs index 3f38ef61f36d..db112fb7f7ae 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTask.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTask.cs @@ -76,7 +76,7 @@ public BatchTask() /// The time at which the Task entered its current state. /// The previous state of the Task. This property is not set if the Task is in its initial Active state. /// The time at which the Task entered its previous state. This property is not set if the Task is in its initial Active state. - /// The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). /// The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. /// A list of files that the Batch service will download to the Compute Node before running the command line. For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. /// A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. @@ -147,7 +147,7 @@ internal BatchTask(string id, string displayName, string url, string eTag, DateT public BatchTaskState? PreviousState { get; } /// The time at which the Task entered its previous state. This property is not set if the Task is in its initial Active state. public DateTimeOffset? PreviousStateTransitionTime { get; } - /// The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). public string CommandLine { get; } /// The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. public BatchTaskContainerSettings ContainerSettings { get; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskContainerSettings.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskContainerSettings.Serialization.cs index eecb5b7f4def..8aefe985e64b 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskContainerSettings.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskContainerSettings.Serialization.cs @@ -51,6 +51,16 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("workingDirectory"u8); writer.WriteStringValue(WorkingDirectory.Value.ToString()); } + if (Optional.IsCollectionDefined(ContainerHostBatchBindMounts)) + { + writer.WritePropertyName("containerHostBatchBindMounts"u8); + writer.WriteStartArray(); + foreach (var item in ContainerHostBatchBindMounts) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } if (options.Format != "W" && _serializedAdditionalRawData != null) { foreach (var item in _serializedAdditionalRawData) @@ -92,6 +102,7 @@ internal static BatchTaskContainerSettings DeserializeBatchTaskContainerSettings string imageName = default; ContainerRegistryReference registry = default; ContainerWorkingDirectory? workingDirectory = default; + IList containerHostBatchBindMounts = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -124,13 +135,33 @@ internal static BatchTaskContainerSettings DeserializeBatchTaskContainerSettings workingDirectory = new ContainerWorkingDirectory(property.Value.GetString()); continue; } + if (property.NameEquals("containerHostBatchBindMounts"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ContainerHostBatchBindMountEntry.DeserializeContainerHostBatchBindMountEntry(item, options)); + } + containerHostBatchBindMounts = array; + continue; + } if (options.Format != "W") { rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); } } serializedAdditionalRawData = rawDataDictionary; - return new BatchTaskContainerSettings(containerRunOptions, imageName, registry, workingDirectory, serializedAdditionalRawData); + return new BatchTaskContainerSettings( + containerRunOptions, + imageName, + registry, + workingDirectory, + containerHostBatchBindMounts ?? new ChangeTrackingList(), + serializedAdditionalRawData); } BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskContainerSettings.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskContainerSettings.cs index 9ecfdf09e22b..f528edcc8753 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskContainerSettings.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskContainerSettings.cs @@ -53,6 +53,7 @@ public BatchTaskContainerSettings(string imageName) Argument.AssertNotNull(imageName, nameof(imageName)); ImageName = imageName; + ContainerHostBatchBindMounts = new ChangeTrackingList(); } /// Initializes a new instance of . @@ -60,13 +61,15 @@ public BatchTaskContainerSettings(string imageName) /// The Image to use to create the container in which the Task will run. This is the full Image reference, as would be specified to "docker pull". If no tag is provided as part of the Image name, the tag ":latest" is used as a default. /// The private registry which contains the container Image. This setting can be omitted if was already provided at Pool creation. /// The location of the container Task working directory. The default is 'taskWorkingDirectory'. + /// The paths you want to mounted to container task. If this array is null or be not present, container task will mount entire temporary disk drive in windows (or AZ_BATCH_NODE_ROOT_DIR in Linux). It won't' mount any data paths into container if this array is set as empty. /// Keeps track of any properties unknown to the library. - internal BatchTaskContainerSettings(string containerRunOptions, string imageName, ContainerRegistryReference registry, ContainerWorkingDirectory? workingDirectory, IDictionary serializedAdditionalRawData) + internal BatchTaskContainerSettings(string containerRunOptions, string imageName, ContainerRegistryReference registry, ContainerWorkingDirectory? workingDirectory, IList containerHostBatchBindMounts, IDictionary serializedAdditionalRawData) { ContainerRunOptions = containerRunOptions; ImageName = imageName; Registry = registry; WorkingDirectory = workingDirectory; + ContainerHostBatchBindMounts = containerHostBatchBindMounts; _serializedAdditionalRawData = serializedAdditionalRawData; } @@ -83,5 +86,7 @@ internal BatchTaskContainerSettings() public ContainerRegistryReference Registry { get; set; } /// The location of the container Task working directory. The default is 'taskWorkingDirectory'. public ContainerWorkingDirectory? WorkingDirectory { get; set; } + /// The paths you want to mounted to container task. If this array is null or be not present, container task will mount entire temporary disk drive in windows (or AZ_BATCH_NODE_ROOT_DIR in Linux). It won't' mount any data paths into container if this array is set as empty. + public IList ContainerHostBatchBindMounts { get; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateContent.cs index 88818560e6b8..2f6b5bda02f0 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateContent.cs @@ -47,7 +47,7 @@ public partial class BatchTaskCreateContent /// Initializes a new instance of . /// A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within a Job that differ only by case). - /// The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). /// or is null. public BatchTaskCreateContent(string id, string commandLine) { @@ -66,7 +66,7 @@ public BatchTaskCreateContent(string id, string commandLine) /// A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within a Job that differ only by case). /// A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. /// How the Batch service should respond when the Task completes. - /// The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). /// The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. /// A list of files that the Batch service will download to the Compute Node before running the command line. For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. /// A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. @@ -112,7 +112,7 @@ internal BatchTaskCreateContent() public string DisplayName { get; set; } /// How the Batch service should respond when the Task completes. public ExitConditions ExitConditions { get; set; } - /// The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). public string CommandLine { get; } /// The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. public BatchTaskContainerSettings ContainerSettings { get; set; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskStatistics.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskStatistics.Serialization.cs index 4085d418d223..a88ddf68652e 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskStatistics.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskStatistics.Serialization.cs @@ -47,9 +47,9 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("wallClockTime"u8); writer.WriteStringValue(WallClockTime, "P"); writer.WritePropertyName("readIOps"u8); - writer.WriteNumberValue(ReadIOps); + writer.WriteStringValue(ReadIOps.ToString()); writer.WritePropertyName("writeIOps"u8); - writer.WriteNumberValue(WriteIOps); + writer.WriteStringValue(WriteIOps.ToString()); writer.WritePropertyName("readIOGiB"u8); writer.WriteNumberValue(ReadIOGiB); writer.WritePropertyName("writeIOGiB"u8); @@ -140,12 +140,12 @@ internal static BatchTaskStatistics DeserializeBatchTaskStatistics(JsonElement e } if (property.NameEquals("readIOps"u8)) { - readIOps = property.Value.GetInt64(); + readIOps = long.Parse(property.Value.GetString()); continue; } if (property.NameEquals("writeIOps"u8)) { - writeIOps = property.Value.GetInt64(); + writeIOps = long.Parse(property.Value.GetString()); continue; } if (property.NameEquals("readIOGiB"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ComputeBatchModelFactory.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ComputeBatchModelFactory.cs index 51e40bc9f43a..2391a55dc591 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/ComputeBatchModelFactory.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ComputeBatchModelFactory.cs @@ -60,7 +60,7 @@ public static BatchErrorDetail BatchErrorDetail(string key = null, string value /// The ID of the Pool whose metrics are aggregated in this entry. /// The start time of the aggregation interval covered by this entry. /// The end time of the aggregation interval covered by this entry. - /// The size of virtual machines in the Pool. All VMs in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + /// The size of virtual machines in the Pool. All VMs in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). /// The total core hours used in the Pool during this aggregation interval. /// A new instance for mocking. public static BatchPoolUsageMetrics BatchPoolUsageMetrics(string poolId = null, DateTimeOffset startTime = default, DateTimeOffset endTime = default, string vmSize = null, float totalCoreHours = default) @@ -77,18 +77,24 @@ public static BatchPoolUsageMetrics BatchPoolUsageMetrics(string poolId = null, /// Initializes a new instance of . /// A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two Pool IDs within an Account that differ only by case). /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. - /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual Machines in Azure (https://learn.microsoft.com/azure/virtual-machines/sizes/overview). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). /// The virtual machine configuration for the Pool. This property must be specified. /// The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). /// The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. /// The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. /// The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. /// Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false. - /// A formula for the desired number of Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see 'Automatically scale Compute Nodes in an Azure Batch Pool' (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). + /// A formula for the desired number of Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see 'Automatically scale Compute Nodes in an Azure Batch Pool' (https://learn.microsoft.com/azure/batch/batch-automatic-scaling). /// The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). /// Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false. /// The network configuration for the Pool. /// A Task specified to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. + /// + /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + /// /// The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. @@ -98,9 +104,10 @@ public static BatchPoolUsageMetrics BatchPoolUsageMetrics(string poolId = null, /// The desired node communication mode for the pool. If omitted, the default value is Default. /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. /// A new instance for mocking. - public static BatchPoolCreateContent BatchPoolCreateContent(string id = null, string displayName = null, string vmSize = null, VirtualMachineConfiguration virtualMachineConfiguration = null, TimeSpan? resizeTimeout = null, IDictionary resourceTags = null, int? targetDedicatedNodes = null, int? targetLowPriorityNodes = null, bool? enableAutoScale = null, string autoScaleFormula = null, TimeSpan? autoScaleEvaluationInterval = null, bool? enableInterNodeCommunication = null, NetworkConfiguration networkConfiguration = null, BatchStartTask startTask = null, IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = null, BatchTaskSchedulingPolicy taskSchedulingPolicy = null, IEnumerable userAccounts = null, IEnumerable metadata = null, IEnumerable mountConfiguration = null, BatchNodeCommunicationMode? targetNodeCommunicationMode = null, UpgradePolicy upgradePolicy = null) + public static BatchPoolCreateContent BatchPoolCreateContent(string id = null, string displayName = null, string vmSize = null, VirtualMachineConfiguration virtualMachineConfiguration = null, TimeSpan? resizeTimeout = null, IDictionary resourceTags = null, int? targetDedicatedNodes = null, int? targetLowPriorityNodes = null, bool? enableAutoScale = null, string autoScaleFormula = null, TimeSpan? autoScaleEvaluationInterval = null, bool? enableInterNodeCommunication = null, NetworkConfiguration networkConfiguration = null, BatchStartTask startTask = null, IEnumerable certificateReferences = null, IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = null, BatchTaskSchedulingPolicy taskSchedulingPolicy = null, IEnumerable userAccounts = null, IEnumerable metadata = null, IEnumerable mountConfiguration = null, BatchNodeCommunicationMode? targetNodeCommunicationMode = null, UpgradePolicy upgradePolicy = null) { resourceTags ??= new Dictionary(); + certificateReferences ??= new List(); applicationPackageReferences ??= new List(); userAccounts ??= new List(); metadata ??= new List(); @@ -121,6 +128,7 @@ public static BatchPoolCreateContent BatchPoolCreateContent(string id = null, st enableInterNodeCommunication, networkConfiguration, startTask, + certificateReferences?.ToList(), applicationPackageReferences?.ToList(), taskSlotsPerNode, taskSchedulingPolicy, @@ -137,10 +145,12 @@ public static BatchPoolCreateContent BatchPoolCreateContent(string id = null, st /// The offer type of the Azure Virtual Machines Marketplace Image. For example, UbuntuServer or WindowsServer. /// The SKU of the Azure Virtual Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. /// The version of the Azure Virtual Machines Marketplace Image. A value of 'latest' can be specified to select the latest version of an Image. If omitted, the default is 'latest'. - /// The ARM resource identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool will be created using this Image Id. This is of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} or /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} for always defaulting to the latest image version. This property is mutually exclusive with other ImageReference properties. The Azure Compute Gallery Image must have replicas in the same region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + /// The ARM resource identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool will be created using this Image Id. This is of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} or /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} for always defaulting to the latest image version. This property is mutually exclusive with other ImageReference properties. The Azure Compute Gallery Image must have replicas in the same region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://learn.microsoft.com/azure/batch/nodes-and-pools#virtual-network-vnet-and-firewall-configuration. /// The specific version of the platform image or marketplace image used to create the node. This read-only field differs from 'version' only if the value specified for 'version' when the pool was created was 'latest'. + /// The shared gallery image unique identifier. This property is mutually exclusive with other properties and can be fetched from shared gallery image GET call. + /// The community gallery image unique identifier. This property is mutually exclusive with other properties and can be fetched from community gallery image GET call. /// A new instance for mocking. - public static ImageReference ImageReference(string publisher = null, string offer = null, string sku = null, string version = null, string virtualMachineImageId = null, string exactVersion = null) + public static ImageReference ImageReference(string publisher = null, string offer = null, string sku = null, string version = null, string virtualMachineImageId = null, string exactVersion = null, string sharedGalleryImageId = null, string communityGalleryImageId = null) { return new ImageReference( publisher, @@ -149,6 +159,8 @@ public static ImageReference ImageReference(string publisher = null, string offe version, virtualMachineImageId, exactVersion, + sharedGalleryImageId, + communityGalleryImageId, serializedAdditionalRawData: null); } @@ -163,7 +175,7 @@ public static ImageReference ImageReference(string publisher = null, string offe /// The time at which the Pool entered its current state. /// Whether the Pool is resizing. /// The time at which the Pool entered its current allocation state. - /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes, see Sizes for Virtual Machines in Azure (https://learn.microsoft.com/azure/virtual-machines/sizes/overview). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). /// The virtual machine configuration for the Pool. This property must be specified. /// The timeout for allocation of Compute Nodes to the Pool. This is the timeout for the most recent resize operation. (The initial sizing when the Pool is created counts as a resize.) The default value is 15 minutes. /// A list of errors encountered while performing the last resize on the Pool. This property is set only if one or more errors occurred during the last Pool resize, and only when the Pool allocationState is Steady. @@ -179,22 +191,29 @@ public static ImageReference ImageReference(string publisher = null, string offe /// Whether the Pool permits direct communication between Compute Nodes. This imposes restrictions on which Compute Nodes can be assigned to the Pool. Specifying this value can reduce the chance of the requested number of Compute Nodes to be allocated in the Pool. /// The network configuration for the Pool. /// A Task specified to run on each Compute Node as it joins the Pool. + /// + /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + /// /// The list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. /// The list of user Accounts to be created on each Compute Node in the Pool. /// A list of name-value pairs associated with the Pool as metadata. - /// Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the CloudPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + /// Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the BatchPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. /// A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. /// The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. /// The desired node communication mode for the pool. If omitted, the default value is Default. /// The current state of the pool communication mode. /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. /// A new instance for mocking. - public static BatchPool BatchPool(string id = null, string displayName = null, string url = null, string eTag = null, DateTimeOffset? lastModified = null, DateTimeOffset? creationTime = null, BatchPoolState? state = null, DateTimeOffset? stateTransitionTime = null, AllocationState? allocationState = null, DateTimeOffset? allocationStateTransitionTime = null, string vmSize = null, VirtualMachineConfiguration virtualMachineConfiguration = null, TimeSpan? resizeTimeout = null, IEnumerable resizeErrors = null, IReadOnlyDictionary resourceTags = null, int? currentDedicatedNodes = null, int? currentLowPriorityNodes = null, int? targetDedicatedNodes = null, int? targetLowPriorityNodes = null, bool? enableAutoScale = null, string autoScaleFormula = null, TimeSpan? autoScaleEvaluationInterval = null, AutoScaleRun autoScaleRun = null, bool? enableInterNodeCommunication = null, NetworkConfiguration networkConfiguration = null, BatchStartTask startTask = null, IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = null, BatchTaskSchedulingPolicy taskSchedulingPolicy = null, IEnumerable userAccounts = null, IEnumerable metadata = null, BatchPoolStatistics stats = null, IEnumerable mountConfiguration = null, BatchPoolIdentity identity = null, BatchNodeCommunicationMode? targetNodeCommunicationMode = null, BatchNodeCommunicationMode? currentNodeCommunicationMode = null, UpgradePolicy upgradePolicy = null) + public static BatchPool BatchPool(string id = null, string displayName = null, string url = null, string eTag = null, DateTimeOffset? lastModified = null, DateTimeOffset? creationTime = null, BatchPoolState? state = null, DateTimeOffset? stateTransitionTime = null, AllocationState? allocationState = null, DateTimeOffset? allocationStateTransitionTime = null, string vmSize = null, VirtualMachineConfiguration virtualMachineConfiguration = null, TimeSpan? resizeTimeout = null, IEnumerable resizeErrors = null, IReadOnlyDictionary resourceTags = null, int? currentDedicatedNodes = null, int? currentLowPriorityNodes = null, int? targetDedicatedNodes = null, int? targetLowPriorityNodes = null, bool? enableAutoScale = null, string autoScaleFormula = null, TimeSpan? autoScaleEvaluationInterval = null, AutoScaleRun autoScaleRun = null, bool? enableInterNodeCommunication = null, NetworkConfiguration networkConfiguration = null, BatchStartTask startTask = null, IEnumerable certificateReferences = null, IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = null, BatchTaskSchedulingPolicy taskSchedulingPolicy = null, IEnumerable userAccounts = null, IEnumerable metadata = null, BatchPoolStatistics stats = null, IEnumerable mountConfiguration = null, BatchPoolIdentity identity = null, BatchNodeCommunicationMode? targetNodeCommunicationMode = null, BatchNodeCommunicationMode? currentNodeCommunicationMode = null, UpgradePolicy upgradePolicy = null) { resizeErrors ??= new List(); resourceTags ??= new Dictionary(); + certificateReferences ??= new List(); applicationPackageReferences ??= new List(); userAccounts ??= new List(); metadata ??= new List(); @@ -227,6 +246,7 @@ public static BatchPool BatchPool(string id = null, string displayName = null, s enableInterNodeCommunication, networkConfiguration, startTask, + certificateReferences?.ToList(), applicationPackageReferences?.ToList(), taskSlotsPerNode, taskSchedulingPolicy, @@ -413,10 +433,12 @@ public static BatchPoolNodeCounts BatchPoolNodeCounts(string poolId = null, Batc /// The number of Compute Nodes in the unknown state. /// The number of Compute Nodes in the unusable state. /// The number of Compute Nodes in the waitingForStartTask state. + /// The number of Compute Nodes in the deallocated state. + /// The number of Compute Nodes in the deallocating state. /// The total number of Compute Nodes. /// The number of Compute Nodes in the upgradingOS state. /// A new instance for mocking. - public static BatchNodeCounts BatchNodeCounts(int creating = default, int idle = default, int offline = default, int preempted = default, int rebooting = default, int reimaging = default, int running = default, int starting = default, int startTaskFailed = default, int leavingPool = default, int unknown = default, int unusable = default, int waitingForStartTask = default, int total = default, int upgradingOs = default) + public static BatchNodeCounts BatchNodeCounts(int creating = default, int idle = default, int offline = default, int preempted = default, int rebooting = default, int reimaging = default, int running = default, int starting = default, int startTaskFailed = default, int leavingPool = default, int unknown = default, int unusable = default, int waitingForStartTask = default, int deallocated = default, int deallocating = default, int total = default, int upgradingOs = default) { return new BatchNodeCounts( creating, @@ -432,6 +454,8 @@ public static BatchNodeCounts BatchNodeCounts(int creating = default, int idle = unknown, unusable, waitingForStartTask, + deallocated, + deallocating, total, upgradingOs, serializedAdditionalRawData: null); @@ -463,7 +487,7 @@ public static BatchNodeCounts BatchNodeCounts(int creating = default, int idle = /// The network configuration for the Job. /// A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. /// The execution information for the Job. - /// Resource usage statistics for the entire lifetime of the Job. This property is populated only if the CloudJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + /// Resource usage statistics for the entire lifetime of the Job. This property is populated only if the BatchJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. /// A new instance for mocking. public static BatchJob BatchJob(string id = null, string displayName = null, bool? usesTaskDependencies = null, string url = null, string eTag = null, DateTimeOffset? lastModified = null, DateTimeOffset? creationTime = null, BatchJobState? state = null, DateTimeOffset? stateTransitionTime = null, BatchJobState? previousState = null, DateTimeOffset? previousStateTransitionTime = null, int? priority = null, bool? allowTaskPreemption = null, int? maxParallelTasks = null, BatchJobConstraints constraints = null, BatchJobManagerTask jobManagerTask = null, BatchJobPreparationTask jobPreparationTask = null, BatchJobReleaseTask jobReleaseTask = null, IEnumerable commonEnvironmentSettings = null, BatchPoolInfo poolInfo = null, OnAllBatchTasksComplete? onAllTasksComplete = null, OnBatchTaskFailure? onTaskFailure = null, BatchJobNetworkConfiguration networkConfiguration = null, IEnumerable metadata = null, BatchJobExecutionInfo executionInfo = null, BatchJobStatistics stats = null) { @@ -752,6 +776,76 @@ public static BatchTaskSlotCounts BatchTaskSlotCounts(int active = default, int serializedAdditionalRawData: null); } + /// Initializes a new instance of . + /// The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may include spaces but these are removed). + /// The algorithm used to derive the thumbprint. This must be sha1. + /// The URL of the Certificate. + /// The state of the Certificate. + /// The time at which the Certificate entered its current state. + /// The previous state of the Certificate. This property is not set if the Certificate is in its initial active state. + /// The time at which the Certificate entered its previous state. This property is not set if the Certificate is in its initial Active state. + /// The public part of the Certificate as a base-64 encoded .cer file. + /// The error that occurred on the last attempt to delete this Certificate. This property is set only if the Certificate is in the DeleteFailed state. + /// The base64-encoded contents of the Certificate. The maximum size is 10KB. + /// The format of the Certificate data. + /// The password to access the Certificate's private key. This must be omitted if the Certificate format is cer. + /// A new instance for mocking. + public static BatchCertificate BatchCertificate(string thumbprint = null, string thumbprintAlgorithm = null, string url = null, BatchCertificateState? state = null, DateTimeOffset? stateTransitionTime = null, BatchCertificateState? previousState = null, DateTimeOffset? previousStateTransitionTime = null, string publicData = null, DeleteBatchCertificateError deleteCertificateError = null, string data = null, BatchCertificateFormat? certificateFormat = null, string password = null) + { + return new BatchCertificate( + thumbprint, + thumbprintAlgorithm, + url, + state, + stateTransitionTime, + previousState, + previousStateTransitionTime, + publicData, + deleteCertificateError, + data, + certificateFormat, + password, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// An identifier for the Certificate deletion error. Codes are invariant and are intended to be consumed programmatically. + /// A message describing the Certificate deletion error, intended to be suitable for display in a user interface. + /// A list of additional error details related to the Certificate deletion error. This list includes details such as the active Pools and Compute Nodes referencing this Certificate. However, if a large number of resources reference the Certificate, the list contains only about the first hundred. + /// A new instance for mocking. + public static DeleteBatchCertificateError DeleteBatchCertificateError(string code = null, string message = null, IEnumerable values = null) + { + values ??= new List(); + + return new DeleteBatchCertificateError(code, message, values?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may include spaces but these are removed). + /// The algorithm used to derive the thumbprint. This must be sha1. + /// The URL of the Certificate. + /// The state of the Certificate. + /// The time at which the Certificate entered its current state. + /// The previous state of the Certificate. This property is not set if the Certificate is in its initial active state. + /// The time at which the Certificate entered its previous state. This property is not set if the Certificate is in its initial Active state. + /// The public part of the Certificate as a base-64 encoded .cer file. + /// The error that occurred on the last attempt to delete this Certificate. This property is set only if the Certificate is in the DeleteFailed state. + /// A new instance for mocking. + public static GetCertificateResponse GetCertificateResponse(string thumbprint = null, string thumbprintAlgorithm = null, string url = null, BatchCertificateState? state = null, DateTimeOffset? stateTransitionTime = null, BatchCertificateState? previousState = null, DateTimeOffset? previousStateTransitionTime = null, string publicData = null, DeleteBatchCertificateError deleteCertificateError = null) + { + return new GetCertificateResponse( + thumbprint, + thumbprintAlgorithm, + url, + state, + stateTransitionTime, + previousState, + previousStateTransitionTime, + publicData, + deleteCertificateError, + serializedAdditionalRawData: null); + } + /// Initializes a new instance of . /// A string that uniquely identifies the schedule within the Account. /// The display name for the schedule. @@ -871,7 +965,7 @@ public static BatchJobScheduleCreateContent BatchJobScheduleCreateContent(string /// A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within a Job that differ only by case). /// A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. /// How the Batch service should respond when the Task completes. - /// The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). /// The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. /// A list of files that the Batch service will download to the Compute Node before running the command line. For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. /// A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. @@ -924,7 +1018,7 @@ public static BatchTaskCreateContent BatchTaskCreateContent(string id = null, st /// The time at which the Task entered its current state. /// The previous state of the Task. This property is not set if the Task is in its initial Active state. /// The time at which the Task entered its previous state. This property is not set if the Task is in its initial Active state. - /// The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). /// The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. /// A list of files that the Batch service will download to the Compute Node before running the command line. For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. /// A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. @@ -1152,7 +1246,7 @@ public static FileProperties FileProperties(DateTimeOffset? creationTime = null, /// The user name of the Account. /// Whether the Account should be an administrator on the Compute Node. The default value is false. /// The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. - /// The password of the Account. The password is required for Windows Compute Nodes (those created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. + /// The password of the Account. The password is required for Windows Compute Nodes. For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. /// The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). /// A new instance for mocking. public static BatchNodeUserCreateContent BatchNodeUserCreateContent(string name = null, bool? isAdmin = null, DateTimeOffset? expiryTime = null, string password = null, string sshPublicKey = null) @@ -1176,7 +1270,7 @@ public static BatchNodeUserCreateContent BatchNodeUserCreateContent(string name /// The time at which this Compute Node was allocated to the Pool. This is the time when the Compute Node was initially allocated and doesn't change once set. It is not updated when the Compute Node is service healed or preempted. /// The IP address that other Nodes can use to communicate with this Compute Node. Every Compute Node that is added to a Pool is assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the IP address is reclaimed and could be reused for new Compute Nodes. /// An identifier which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. - /// The size of the virtual machine hosting the Compute Node. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + /// The size of the virtual machine hosting the Compute Node. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). /// The total number of Job Tasks completed on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. /// The total number of currently running Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. /// The total number of scheduling slots used by currently running Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. @@ -1184,15 +1278,22 @@ public static BatchNodeUserCreateContent BatchNodeUserCreateContent(string name /// A list of Tasks whose state has recently changed. This property is present only if at least one Task has run on this Compute Node since it was assigned to the Pool. /// The Task specified to run on the Compute Node as it joins the Pool. /// Runtime information about the execution of the StartTask on the Compute Node. + /// + /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. + /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. + /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. + /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. + /// /// The list of errors that are currently being encountered by the Compute Node. /// Whether this Compute Node is a dedicated Compute Node. If false, the Compute Node is a Spot/Low-priority Compute Node. /// The endpoint configuration for the Compute Node. /// Information about the Compute Node agent version and the time the Compute Node upgraded to a new version. /// Info about the current state of the virtual machine. /// A new instance for mocking. - public static BatchNode BatchNode(string id = null, string url = null, BatchNodeState? state = null, SchedulingState? schedulingState = null, DateTimeOffset? stateTransitionTime = null, DateTimeOffset? lastBootTime = null, DateTimeOffset? allocationTime = null, string ipAddress = null, string affinityId = null, string vmSize = null, int? totalTasksRun = null, int? runningTasksCount = null, int? runningTaskSlotsCount = null, int? totalTasksSucceeded = null, IEnumerable recentTasks = null, BatchStartTask startTask = null, BatchStartTaskInfo startTaskInfo = null, IEnumerable errors = null, bool? isDedicated = null, BatchNodeEndpointConfiguration endpointConfiguration = null, BatchNodeAgentInfo nodeAgentInfo = null, VirtualMachineInfo virtualMachineInfo = null) + public static BatchNode BatchNode(string id = null, string url = null, BatchNodeState? state = null, SchedulingState? schedulingState = null, DateTimeOffset? stateTransitionTime = null, DateTimeOffset? lastBootTime = null, DateTimeOffset? allocationTime = null, string ipAddress = null, string affinityId = null, string vmSize = null, int? totalTasksRun = null, int? runningTasksCount = null, int? runningTaskSlotsCount = null, int? totalTasksSucceeded = null, IEnumerable recentTasks = null, BatchStartTask startTask = null, BatchStartTaskInfo startTaskInfo = null, IEnumerable certificateReferences = null, IEnumerable errors = null, bool? isDedicated = null, BatchNodeEndpointConfiguration endpointConfiguration = null, BatchNodeAgentInfo nodeAgentInfo = null, VirtualMachineInfo virtualMachineInfo = null) { recentTasks ??= new List(); + certificateReferences ??= new List(); errors ??= new List(); return new BatchNode( @@ -1213,6 +1314,7 @@ public static BatchNode BatchNode(string id = null, string url = null, BatchNode recentTasks?.ToList(), startTask, startTaskInfo, + certificateReferences?.ToList(), errors?.ToList(), isDedicated, endpointConfiguration, diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerHostBatchBindMountEntry.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerHostBatchBindMountEntry.Serialization.cs new file mode 100644 index 000000000000..b8561224406c --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerHostBatchBindMountEntry.Serialization.cs @@ -0,0 +1,164 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class ContainerHostBatchBindMountEntry : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + JsonModelWriteCore(writer, options); + writer.WriteEndObject(); + } + + /// The JSON writer. + /// The client options for reading and writing models. + protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ContainerHostBatchBindMountEntry)} does not support writing '{format}' format."); + } + + if (Optional.IsDefined(Source)) + { + writer.WritePropertyName("source"u8); + writer.WriteStringValue(Source.Value.ToString()); + } + if (Optional.IsDefined(IsReadOnly)) + { + writer.WritePropertyName("isReadOnly"u8); + writer.WriteBooleanValue(IsReadOnly.Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + } + + ContainerHostBatchBindMountEntry IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ContainerHostBatchBindMountEntry)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeContainerHostBatchBindMountEntry(document.RootElement, options); + } + + internal static ContainerHostBatchBindMountEntry DeserializeContainerHostBatchBindMountEntry(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ContainerHostDataPath? source = default; + bool? isReadOnly = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("source"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + source = new ContainerHostDataPath(property.Value.GetString()); + continue; + } + if (property.NameEquals("isReadOnly"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + isReadOnly = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new ContainerHostBatchBindMountEntry(source, isReadOnly, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ContainerHostBatchBindMountEntry)} does not support writing '{options.Format}' format."); + } + } + + ContainerHostBatchBindMountEntry IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeContainerHostBatchBindMountEntry(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ContainerHostBatchBindMountEntry)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static ContainerHostBatchBindMountEntry FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeContainerHostBatchBindMountEntry(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerHostBatchBindMountEntry.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerHostBatchBindMountEntry.cs new file mode 100644 index 000000000000..129e36e79f3d --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerHostBatchBindMountEntry.cs @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The entry of path and mount mode you want to mount into task container. + public partial class ContainerHostBatchBindMountEntry + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public ContainerHostBatchBindMountEntry() + { + } + + /// Initializes a new instance of . + /// The path which be mounted to container customer can select. + /// Mount this source path as read-only mode or not. Default value is false (read/write mode). For Linux, if you mount this path as a read/write mode, this does not mean that all users in container have the read/write access for the path, it depends on the access in host VM. If this path is mounted read-only, all users within the container will not be able to modify the path. + /// Keeps track of any properties unknown to the library. + internal ContainerHostBatchBindMountEntry(ContainerHostDataPath? source, bool? isReadOnly, IDictionary serializedAdditionalRawData) + { + Source = source; + IsReadOnly = isReadOnly; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The path which be mounted to container customer can select. + public ContainerHostDataPath? Source { get; set; } + /// Mount this source path as read-only mode or not. Default value is false (read/write mode). For Linux, if you mount this path as a read/write mode, this does not mean that all users in container have the read/write access for the path, it depends on the access in host VM. If this path is mounted read-only, all users within the container will not be able to modify the path. + public bool? IsReadOnly { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerHostDataPath.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerHostDataPath.cs new file mode 100644 index 000000000000..db0708edc775 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerHostDataPath.cs @@ -0,0 +1,63 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// The paths which will be mounted to container task's container. + public readonly partial struct ContainerHostDataPath : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ContainerHostDataPath(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string SharedValue = "Shared"; + private const string StartupValue = "Startup"; + private const string VfsMountsValue = "VfsMounts"; + private const string TaskValue = "Task"; + private const string JobPrepValue = "JobPrep"; + private const string ApplicationsValue = "Applications"; + + /// The path for multi-instances task to shared their files. + public static ContainerHostDataPath Shared { get; } = new ContainerHostDataPath(SharedValue); + /// The path for start task. + public static ContainerHostDataPath Startup { get; } = new ContainerHostDataPath(StartupValue); + /// The path contains all virtual file systems are mounted on this node. + public static ContainerHostDataPath VfsMounts { get; } = new ContainerHostDataPath(VfsMountsValue); + /// The task path. + public static ContainerHostDataPath Task { get; } = new ContainerHostDataPath(TaskValue); + /// The job-prep task path. + public static ContainerHostDataPath JobPrep { get; } = new ContainerHostDataPath(JobPrepValue); + /// The applications path. + public static ContainerHostDataPath Applications { get; } = new ContainerHostDataPath(ApplicationsValue); + /// Determines if two values are the same. + public static bool operator ==(ContainerHostDataPath left, ContainerHostDataPath right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ContainerHostDataPath left, ContainerHostDataPath right) => !left.Equals(right); + /// Converts a to a . + public static implicit operator ContainerHostDataPath(string value) => new ContainerHostDataPath(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ContainerHostDataPath other && Equals(other); + /// + public bool Equals(ContainerHostDataPath other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DeleteBatchCertificateError.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DeleteBatchCertificateError.Serialization.cs new file mode 100644 index 000000000000..2d5afec3081a --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DeleteBatchCertificateError.Serialization.cs @@ -0,0 +1,181 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class DeleteBatchCertificateError : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + JsonModelWriteCore(writer, options); + writer.WriteEndObject(); + } + + /// The JSON writer. + /// The client options for reading and writing models. + protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DeleteBatchCertificateError)} does not support writing '{format}' format."); + } + + if (Optional.IsDefined(Code)) + { + writer.WritePropertyName("code"u8); + writer.WriteStringValue(Code); + } + if (Optional.IsDefined(Message)) + { + writer.WritePropertyName("message"u8); + writer.WriteStringValue(Message); + } + if (Optional.IsCollectionDefined(Values)) + { + writer.WritePropertyName("values"u8); + writer.WriteStartArray(); + foreach (var item in Values) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + } + + DeleteBatchCertificateError IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DeleteBatchCertificateError)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeDeleteBatchCertificateError(document.RootElement, options); + } + + internal static DeleteBatchCertificateError DeserializeDeleteBatchCertificateError(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string code = default; + string message = default; + IReadOnlyList values = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("code"u8)) + { + code = property.Value.GetString(); + continue; + } + if (property.NameEquals("message"u8)) + { + message = property.Value.GetString(); + continue; + } + if (property.NameEquals("values"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(NameValuePair.DeserializeNameValuePair(item, options)); + } + values = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new DeleteBatchCertificateError(code, message, values ?? new ChangeTrackingList(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(DeleteBatchCertificateError)} does not support writing '{options.Format}' format."); + } + } + + DeleteBatchCertificateError IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeDeleteBatchCertificateError(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(DeleteBatchCertificateError)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static DeleteBatchCertificateError FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeDeleteBatchCertificateError(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DeleteBatchCertificateError.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DeleteBatchCertificateError.cs new file mode 100644 index 000000000000..78ecee1fc422 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DeleteBatchCertificateError.cs @@ -0,0 +1,74 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// An error encountered by the Batch service when deleting a Certificate. + public partial class DeleteBatchCertificateError + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + internal DeleteBatchCertificateError() + { + Values = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// An identifier for the Certificate deletion error. Codes are invariant and are intended to be consumed programmatically. + /// A message describing the Certificate deletion error, intended to be suitable for display in a user interface. + /// A list of additional error details related to the Certificate deletion error. This list includes details such as the active Pools and Compute Nodes referencing this Certificate. However, if a large number of resources reference the Certificate, the list contains only about the first hundred. + /// Keeps track of any properties unknown to the library. + internal DeleteBatchCertificateError(string code, string message, IReadOnlyList values, IDictionary serializedAdditionalRawData) + { + Code = code; + Message = message; + Values = values; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// An identifier for the Certificate deletion error. Codes are invariant and are intended to be consumed programmatically. + public string Code { get; } + /// A message describing the Certificate deletion error, intended to be suitable for display in a user interface. + public string Message { get; } + /// A list of additional error details related to the Certificate deletion error. This list includes details such as the active Pools and Compute Nodes referencing this Certificate. However, if a large number of resources reference the Certificate, the list contains only about the first hundred. + public IReadOnlyList Values { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskPlacement.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskPlacement.cs index 4a31115ce568..0729c6609146 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskPlacement.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskPlacement.cs @@ -10,7 +10,7 @@ namespace Azure.Compute.Batch { - /// AccessDiffDiskPlacementScope enums. + /// Specifies the ephemeral disk placement for operating system disk for all compute nodes (VMs) in the pool. This property can be used by user in the request to choose which location the operating system should be in. e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://learn.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://learn.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. public readonly partial struct DiffDiskPlacement : IEquatable { private readonly string _value; diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskSettings.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskSettings.cs index 03ec91b162ed..0ae2222fd149 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskSettings.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskSettings.cs @@ -54,7 +54,7 @@ public DiffDiskSettings() } /// Initializes a new instance of . - /// Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. + /// Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://learn.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://learn.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. /// Keeps track of any properties unknown to the library. internal DiffDiskSettings(DiffDiskPlacement? placement, IDictionary serializedAdditionalRawData) { @@ -62,7 +62,7 @@ internal DiffDiskSettings(DiffDiskPlacement? placement, IDictionary Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. + /// Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://learn.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://learn.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. public DiffDiskPlacement? Placement { get; set; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionConfiguration.cs index 1a8f6a27f6ca..2ad49f0b0d74 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionConfiguration.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionConfiguration.cs @@ -56,7 +56,7 @@ public DiskEncryptionConfiguration() } /// Initializes a new instance of . - /// The list of disk targets Batch Service will encrypt on the compute node. If omitted, no disks on the compute nodes in the pool will be encrypted. On Linux pool, only "TemporaryDisk" is supported; on Windows pool, "OsDisk" and "TemporaryDisk" must be specified. + /// The list of disk targets Batch Service will encrypt on the compute node. The list of disk targets Batch Service will encrypt on the compute node. /// Keeps track of any properties unknown to the library. internal DiskEncryptionConfiguration(IList targets, IDictionary serializedAdditionalRawData) { @@ -64,7 +64,7 @@ internal DiskEncryptionConfiguration(IList targets, IDicti _serializedAdditionalRawData = serializedAdditionalRawData; } - /// The list of disk targets Batch Service will encrypt on the compute node. If omitted, no disks on the compute nodes in the pool will be encrypted. On Linux pool, only "TemporaryDisk" is supported; on Windows pool, "OsDisk" and "TemporaryDisk" must be specified. + /// The list of disk targets Batch Service will encrypt on the compute node. The list of disk targets Batch Service will encrypt on the compute node. public IList Targets { get; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/Docs/BatchClient.xml b/sdk/batch/Azure.Compute.Batch/src/Generated/Docs/BatchClient.xml index 105b11993153..4c1259a767f9 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/Docs/BatchClient.xml +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/Docs/BatchClient.xml @@ -151,7 +151,10 @@ BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool001", "standard_ }, Caching = CachingType.ReadWrite, DiskSizeGB = 100, - ManagedDisk = new ManagedDisk(StorageAccountType.StandardSSDLRS), + ManagedDisk = new ManagedDisk + { + StorageAccountType = StorageAccountType.StandardSSDLRS, + }, }, }, ResizeTimeout = XmlConvert.ToTimeSpan("PT15M"), @@ -427,7 +430,10 @@ BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool001", "standard_ }, Caching = CachingType.ReadWrite, DiskSizeGB = 100, - ManagedDisk = new ManagedDisk(StorageAccountType.StandardSSDLRS), + ManagedDisk = new ManagedDisk + { + StorageAccountType = StorageAccountType.StandardSSDLRS, + }, }, }, ResizeTimeout = XmlConvert.ToTimeSpan("PT15M"), @@ -1977,7 +1983,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolReplaceContent pool = new BatchPoolReplaceContent(Array.Empty(), Array.Empty()) +BatchPoolReplaceContent pool = new BatchPoolReplaceContent(Array.Empty(), Array.Empty(), Array.Empty()) { StartTask = new BatchStartTask("/bin/bash -c 'echo start task'"), }; @@ -1992,7 +1998,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolReplaceContent pool = new BatchPoolReplaceContent(Array.Empty(), Array.Empty()) +BatchPoolReplaceContent pool = new BatchPoolReplaceContent(Array.Empty(), Array.Empty(), Array.Empty()) { StartTask = new BatchStartTask("/bin/bash -c 'echo start task'"), }; @@ -2013,6 +2019,7 @@ using RequestContent content = RequestContent.Create(new { commandLine = "/bin/bash -c 'echo start task'", }, + certificateReferences = Array.Empty(), applicationPackageReferences = Array.Empty(), metadata = Array.Empty(), }); @@ -2035,6 +2042,7 @@ using RequestContent content = RequestContent.Create(new { commandLine = "/bin/bash -c 'echo start task'", }, + certificateReferences = Array.Empty(), applicationPackageReferences = Array.Empty(), metadata = Array.Empty(), }); @@ -2109,7 +2117,7 @@ Response response = client.RemoveNodes("poolId", content); Console.WriteLine(response.Status); ]]> - + This sample shows how to call DeleteJobAsync. - + This sample shows how to call DeleteJob. - + This sample shows how to call TerminateJobAsync. - + This sample shows how to call TerminateJob. - + This sample shows how to call TerminateJobAsync. - + This sample shows how to call TerminateJob. - + + +This sample shows how to call CreateCertificateAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchCertificate certificate = new BatchCertificate("0123456789abcdef0123456789abcdef01234567", "sha1", "#####...") +{ + CertificateFormat = BatchCertificateFormat.Pfx, + Password = "", +}; +Response response = await client.CreateCertificateAsync(certificate); +]]> + + + +This sample shows how to call CreateCertificate. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchCertificate certificate = new BatchCertificate("0123456789abcdef0123456789abcdef01234567", "sha1", "#####...") +{ + CertificateFormat = BatchCertificateFormat.Pfx, + Password = "", +}; +Response response = client.CreateCertificate(certificate); +]]> + + + +This sample shows how to call CreateCertificateAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + thumbprintAlgorithm = "sha1", + thumbprint = "0123456789abcdef0123456789abcdef01234567", + data = "#####...", + certificateFormat = "pfx", + password = "", +}); +Response response = await client.CreateCertificateAsync(content); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call CreateCertificate. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + thumbprintAlgorithm = "sha1", + thumbprint = "0123456789abcdef0123456789abcdef01234567", + data = "#####...", + certificateFormat = "pfx", + password = "", +}); +Response response = client.CreateCertificate(content); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call CancelCertificateDeletionAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.CancelCertificateDeletionAsync("sha1", "0123456789abcdef0123456789abcdef01234567"); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call CancelCertificateDeletion. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.CancelCertificateDeletion("sha1", "0123456789abcdef0123456789abcdef01234567"); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DeleteCertificateAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.DeleteCertificateAsync("sha1", "0123456789abcdef0123456789abcdef01234567"); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DeleteCertificate. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.DeleteCertificate("sha1", "0123456789abcdef0123456789abcdef01234567"); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call GetCertificateAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetCertificateAsync("sha1", "0123456789abcdef0123456789abcdef01234567"); +]]> + + + +This sample shows how to call GetCertificate. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetCertificate("sha1", "0123456789abcdef0123456789abcdef01234567"); +]]> + + + +This sample shows how to call GetCertificateAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetCertificateAsync("sha1", "0123456789abcdef0123456789abcdef01234567", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("thumbprint").ToString()); +Console.WriteLine(result.GetProperty("thumbprintAlgorithm").ToString()); +]]> + + + +This sample shows how to call GetCertificate and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetCertificate("sha1", "0123456789abcdef0123456789abcdef01234567", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("thumbprint").ToString()); +Console.WriteLine(result.GetProperty("thumbprintAlgorithm").ToString()); +]]> + + This sample shows how to call DeleteJobScheduleAsync. - + This sample shows how to call DeleteJobSchedule. - + This sample shows how to call TerminateJobScheduleAsync. - + This sample shows how to call TerminateJobSchedule. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "cmd /c exit 3") +BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "bash -c 'echo hello'") { - ExitConditions = new ExitConditions + ContainerSettings = new BatchTaskContainerSettings("ubuntu") { - ExitCodeRanges = {new ExitCodeRangeMapping(2, 4, new ExitOptions + ContainerHostBatchBindMounts = {new ContainerHostBatchBindMountEntry { - JobAction = BatchJobAction.Terminate, - })}, + Source = ContainerHostDataPath.Task, + IsReadOnly = true, + }}, }, UserIdentity = new UserIdentity { @@ -4125,35 +4464,19 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchTaskCreateContent task = new BatchTaskCreateContent("task1", "cmd /c echo task1") -{ - RequiredSlots = 2, -}; -Response response = await client.CreateTaskAsync("jobId", task); -]]> - - - -This sample shows how to call CreateTask. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -BatchTaskCreateContent task = new BatchTaskCreateContent("task1", "cmd /c echo task1"); -Response response = client.CreateTask("jobId", task); -]]> -This sample shows how to call CreateTask. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "bash -c 'echo hello'") { ContainerSettings = new BatchTaskContainerSettings("ubuntu") { - ContainerRunOptions = "--rm", + ContainerHostBatchBindMounts = {new ContainerHostBatchBindMountEntry + { + Source = ContainerHostDataPath.Task, + IsReadOnly = true, + }, new ContainerHostBatchBindMountEntry + { + Source = ContainerHostDataPath.Task, + IsReadOnly = true, + }}, }, UserIdentity = new UserIdentity { @@ -4164,9 +4487,9 @@ BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "bash -c 'ech }, }, }; -Response response = client.CreateTask("jobId", task); +Response response = await client.CreateTaskAsync("jobId", task); ]]> -This sample shows how to call CreateTask. +This sample shows how to call CreateTaskAsync. "); TokenCredential credential = new DefaultAzureCredential(); @@ -4190,9 +4513,9 @@ BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "cmd /c exit }, }, }; -Response response = client.CreateTask("jobId", task); +Response response = await client.CreateTaskAsync("jobId", task); ]]> -This sample shows how to call CreateTask. +This sample shows how to call CreateTaskAsync. "); TokenCredential credential = new DefaultAzureCredential(); @@ -4202,21 +4525,152 @@ BatchTaskCreateContent task = new BatchTaskCreateContent("task1", "cmd /c echo t { RequiredSlots = 2, }; -Response response = client.CreateTask("jobId", task); +Response response = await client.CreateTaskAsync("jobId", task); ]]> - + -This sample shows how to call CreateTaskAsync. +This sample shows how to call CreateTask. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -using RequestContent content = RequestContent.Create(new -{ - id = "task1", - commandLine = "cmd /c echo task1", +BatchTaskCreateContent task = new BatchTaskCreateContent("task1", "cmd /c echo task1"); +Response response = client.CreateTask("jobId", task); +]]> +This sample shows how to call CreateTask. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "bash -c 'echo hello'") +{ + ContainerSettings = new BatchTaskContainerSettings("ubuntu") + { + ContainerRunOptions = "--rm", + }, + UserIdentity = new UserIdentity + { + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, +}; +Response response = client.CreateTask("jobId", task); +]]> +This sample shows how to call CreateTask. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "bash -c 'echo hello'") +{ + ContainerSettings = new BatchTaskContainerSettings("ubuntu") + { + ContainerHostBatchBindMounts = {new ContainerHostBatchBindMountEntry + { + Source = ContainerHostDataPath.Task, + IsReadOnly = true, + }}, + }, + UserIdentity = new UserIdentity + { + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, +}; +Response response = client.CreateTask("jobId", task); +]]> +This sample shows how to call CreateTask. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "bash -c 'echo hello'") +{ + ContainerSettings = new BatchTaskContainerSettings("ubuntu") + { + ContainerHostBatchBindMounts = {new ContainerHostBatchBindMountEntry + { + Source = ContainerHostDataPath.Task, + IsReadOnly = true, + }, new ContainerHostBatchBindMountEntry + { + Source = ContainerHostDataPath.Task, + IsReadOnly = true, + }}, + }, + UserIdentity = new UserIdentity + { + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, +}; +Response response = client.CreateTask("jobId", task); +]]> +This sample shows how to call CreateTask. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "cmd /c exit 3") +{ + ExitConditions = new ExitConditions + { + ExitCodeRanges = {new ExitCodeRangeMapping(2, 4, new ExitOptions + { + JobAction = BatchJobAction.Terminate, + })}, + }, + UserIdentity = new UserIdentity + { + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, +}; +Response response = client.CreateTask("jobId", task); +]]> +This sample shows how to call CreateTask. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchTaskCreateContent task = new BatchTaskCreateContent("task1", "cmd /c echo task1") +{ + RequiredSlots = 2, +}; +Response response = client.CreateTask("jobId", task); +]]> + + + +This sample shows how to call CreateTaskAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "task1", + commandLine = "cmd /c echo task1", }); Response response = await client.CreateTaskAsync("jobId", content); @@ -4256,6 +4710,81 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); +using RequestContent content = RequestContent.Create(new +{ + id = "taskId", + commandLine = "bash -c 'echo hello'", + containerSettings = new + { + imageName = "ubuntu", + containerHostBatchBindMounts = new object[] + { + new + { + source = "Task", + isReadOnly = true, + } + }, + }, + userIdentity = new + { + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, +}); +Response response = await client.CreateTaskAsync("jobId", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call CreateTaskAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "taskId", + commandLine = "bash -c 'echo hello'", + containerSettings = new + { + imageName = "ubuntu", + containerHostBatchBindMounts = new object[] + { + new + { + source = "Task", + isReadOnly = true, + }, + new + { + source = "Task", + isReadOnly = true, + } + }, + }, + userIdentity = new + { + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, +}); +Response response = await client.CreateTaskAsync("jobId", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call CreateTaskAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + using RequestContent content = RequestContent.Create(new { id = "taskId", @@ -4356,6 +4885,81 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); +using RequestContent content = RequestContent.Create(new +{ + id = "taskId", + commandLine = "bash -c 'echo hello'", + containerSettings = new + { + imageName = "ubuntu", + containerHostBatchBindMounts = new object[] + { + new + { + source = "Task", + isReadOnly = true, + } + }, + }, + userIdentity = new + { + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, +}); +Response response = client.CreateTask("jobId", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call CreateTask. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "taskId", + commandLine = "bash -c 'echo hello'", + containerSettings = new + { + imageName = "ubuntu", + containerHostBatchBindMounts = new object[] + { + new + { + source = "Task", + isReadOnly = true, + }, + new + { + source = "Task", + isReadOnly = true, + } + }, + }, + userIdentity = new + { + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, +}); +Response response = client.CreateTask("jobId", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call CreateTask. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + using RequestContent content = RequestContent.Create(new { id = "taskId", @@ -5282,6 +5886,132 @@ BatchClient client = new BatchClient(endpoint, credential); using RequestContent content = null; Response response = client.RebootNode("poolId", "tvm-1695681911_1-20161122t193202z", content); +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call StartNodeAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.StartNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z"); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call StartNode. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.StartNode("poolId", "tvm-1695681911_1-20161122t193202z"); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call ReimageNodeAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.ReimageNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z"); +]]> + + + +This sample shows how to call ReimageNode. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.ReimageNode("poolId", "tvm-1695681911_1-20161122t193202z"); +]]> + + + +This sample shows how to call ReimageNodeAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = null; +Response response = await client.ReimageNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z", content); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call ReimageNode. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = null; +Response response = client.ReimageNode("poolId", "tvm-1695681911_1-20161122t193202z", content); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DeallocateNodeAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.DeallocateNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z"); +]]> + + + +This sample shows how to call DeallocateNode. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.DeallocateNode("poolId", "tvm-1695681911_1-20161122t193202z"); +]]> + + + +This sample shows how to call DeallocateNodeAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = null; +Response response = await client.DeallocateNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z", content); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DeallocateNode. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = null; +Response response = client.DeallocateNode("poolId", "tvm-1695681911_1-20161122t193202z", content); + Console.WriteLine(response.Status); ]]> @@ -6067,6 +6797,66 @@ foreach (BinaryData item in client.GetJobPreparationAndReleaseTaskStatuses("jobI JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; Console.WriteLine(result.ToString()); } +]]> + + + +This sample shows how to call GetCertificatesAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchCertificate item in client.GetCertificatesAsync()) +{ +} +]]> + + + +This sample shows how to call GetCertificates. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BatchCertificate item in client.GetCertificates()) +{ +} +]]> + + + +This sample shows how to call GetCertificatesAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetCertificatesAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("thumbprint").ToString()); + Console.WriteLine(result.GetProperty("thumbprintAlgorithm").ToString()); + Console.WriteLine(result.GetProperty("data").ToString()); +} +]]> + + + +This sample shows how to call GetCertificates and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BinaryData item in client.GetCertificates(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("thumbprint").ToString()); + Console.WriteLine(result.GetProperty("thumbprintAlgorithm").ToString()); + Console.WriteLine(result.GetProperty("data").ToString()); +} ]]> diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/FileProperties.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/FileProperties.Serialization.cs index c8a9fbc9d86d..1fd13732e92a 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/FileProperties.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/FileProperties.Serialization.cs @@ -42,7 +42,7 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("lastModified"u8); writer.WriteStringValue(LastModified, "O"); writer.WritePropertyName("contentLength"u8); - writer.WriteNumberValue(ContentLength); + writer.WriteStringValue(ContentLength.ToString()); if (Optional.IsDefined(ContentType)) { writer.WritePropertyName("contentType"u8); @@ -115,7 +115,7 @@ internal static FileProperties DeserializeFileProperties(JsonElement element, Mo } if (property.NameEquals("contentLength"u8)) { - contentLength = property.Value.GetInt64(); + contentLength = long.Parse(property.Value.GetString()); continue; } if (property.NameEquals("contentType"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/GetCertificateResponse.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/GetCertificateResponse.Serialization.cs new file mode 100644 index 000000000000..36f85dc7ef04 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/GetCertificateResponse.Serialization.cs @@ -0,0 +1,257 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class GetCertificateResponse : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + JsonModelWriteCore(writer, options); + writer.WriteEndObject(); + } + + /// The JSON writer. + /// The client options for reading and writing models. + protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(GetCertificateResponse)} does not support writing '{format}' format."); + } + + writer.WritePropertyName("thumbprint"u8); + writer.WriteStringValue(Thumbprint); + writer.WritePropertyName("thumbprintAlgorithm"u8); + writer.WriteStringValue(ThumbprintAlgorithm); + if (options.Format != "W" && Optional.IsDefined(Url)) + { + writer.WritePropertyName("url"u8); + writer.WriteStringValue(Url); + } + if (options.Format != "W" && Optional.IsDefined(State)) + { + writer.WritePropertyName("state"u8); + writer.WriteStringValue(State.Value.ToString()); + } + if (options.Format != "W" && Optional.IsDefined(StateTransitionTime)) + { + writer.WritePropertyName("stateTransitionTime"u8); + writer.WriteStringValue(StateTransitionTime.Value, "O"); + } + if (options.Format != "W" && Optional.IsDefined(PreviousState)) + { + writer.WritePropertyName("previousState"u8); + writer.WriteStringValue(PreviousState.Value.ToString()); + } + if (options.Format != "W" && Optional.IsDefined(PreviousStateTransitionTime)) + { + writer.WritePropertyName("previousStateTransitionTime"u8); + writer.WriteStringValue(PreviousStateTransitionTime.Value, "O"); + } + if (options.Format != "W" && Optional.IsDefined(PublicData)) + { + writer.WritePropertyName("publicData"u8); + writer.WriteStringValue(PublicData); + } + if (options.Format != "W" && Optional.IsDefined(DeleteCertificateError)) + { + writer.WritePropertyName("deleteCertificateError"u8); + writer.WriteObjectValue(DeleteCertificateError, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + } + + GetCertificateResponse IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(GetCertificateResponse)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeGetCertificateResponse(document.RootElement, options); + } + + internal static GetCertificateResponse DeserializeGetCertificateResponse(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string thumbprint = default; + string thumbprintAlgorithm = default; + string url = default; + BatchCertificateState? state = default; + DateTimeOffset? stateTransitionTime = default; + BatchCertificateState? previousState = default; + DateTimeOffset? previousStateTransitionTime = default; + string publicData = default; + DeleteBatchCertificateError deleteCertificateError = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("thumbprint"u8)) + { + thumbprint = property.Value.GetString(); + continue; + } + if (property.NameEquals("thumbprintAlgorithm"u8)) + { + thumbprintAlgorithm = property.Value.GetString(); + continue; + } + if (property.NameEquals("url"u8)) + { + url = property.Value.GetString(); + continue; + } + if (property.NameEquals("state"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + state = new BatchCertificateState(property.Value.GetString()); + continue; + } + if (property.NameEquals("stateTransitionTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + stateTransitionTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("previousState"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + previousState = new BatchCertificateState(property.Value.GetString()); + continue; + } + if (property.NameEquals("previousStateTransitionTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + previousStateTransitionTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("publicData"u8)) + { + publicData = property.Value.GetString(); + continue; + } + if (property.NameEquals("deleteCertificateError"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + deleteCertificateError = DeleteBatchCertificateError.DeserializeDeleteBatchCertificateError(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new GetCertificateResponse( + thumbprint, + thumbprintAlgorithm, + url, + state, + stateTransitionTime, + previousState, + previousStateTransitionTime, + publicData, + deleteCertificateError, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(GetCertificateResponse)} does not support writing '{options.Format}' format."); + } + } + + GetCertificateResponse IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeGetCertificateResponse(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(GetCertificateResponse)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static GetCertificateResponse FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeGetCertificateResponse(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/GetCertificateResponse.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/GetCertificateResponse.cs new file mode 100644 index 000000000000..04dc1d51de1c --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/GetCertificateResponse.cs @@ -0,0 +1,110 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The GetCertificateResponse. + public partial class GetCertificateResponse + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may include spaces but these are removed). + /// The algorithm used to derive the thumbprint. This must be sha1. + /// or is null. + internal GetCertificateResponse(string thumbprint, string thumbprintAlgorithm) + { + Argument.AssertNotNull(thumbprint, nameof(thumbprint)); + Argument.AssertNotNull(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); + + Thumbprint = thumbprint; + ThumbprintAlgorithm = thumbprintAlgorithm; + } + + /// Initializes a new instance of . + /// The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may include spaces but these are removed). + /// The algorithm used to derive the thumbprint. This must be sha1. + /// The URL of the Certificate. + /// The state of the Certificate. + /// The time at which the Certificate entered its current state. + /// The previous state of the Certificate. This property is not set if the Certificate is in its initial active state. + /// The time at which the Certificate entered its previous state. This property is not set if the Certificate is in its initial Active state. + /// The public part of the Certificate as a base-64 encoded .cer file. + /// The error that occurred on the last attempt to delete this Certificate. This property is set only if the Certificate is in the DeleteFailed state. + /// Keeps track of any properties unknown to the library. + internal GetCertificateResponse(string thumbprint, string thumbprintAlgorithm, string url, BatchCertificateState? state, DateTimeOffset? stateTransitionTime, BatchCertificateState? previousState, DateTimeOffset? previousStateTransitionTime, string publicData, DeleteBatchCertificateError deleteCertificateError, IDictionary serializedAdditionalRawData) + { + Thumbprint = thumbprint; + ThumbprintAlgorithm = thumbprintAlgorithm; + Url = url; + State = state; + StateTransitionTime = stateTransitionTime; + PreviousState = previousState; + PreviousStateTransitionTime = previousStateTransitionTime; + PublicData = publicData; + DeleteCertificateError = deleteCertificateError; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal GetCertificateResponse() + { + } + + /// The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may include spaces but these are removed). + public string Thumbprint { get; } + /// The algorithm used to derive the thumbprint. This must be sha1. + public string ThumbprintAlgorithm { get; } + /// The URL of the Certificate. + public string Url { get; } + /// The state of the Certificate. + public BatchCertificateState? State { get; } + /// The time at which the Certificate entered its current state. + public DateTimeOffset? StateTransitionTime { get; } + /// The previous state of the Certificate. This property is not set if the Certificate is in its initial active state. + public BatchCertificateState? PreviousState { get; } + /// The time at which the Certificate entered its previous state. This property is not set if the Certificate is in its initial Active state. + public DateTimeOffset? PreviousStateTransitionTime { get; } + /// The public part of the Certificate as a base-64 encoded .cer file. + public string PublicData { get; } + /// The error that occurred on the last attempt to delete this Certificate. This property is set only if the Certificate is in the DeleteFailed state. + public DeleteBatchCertificateError DeleteCertificateError { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ImageReference.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ImageReference.Serialization.cs index 2c12fb0ec6d9..b2029c5fcd57 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/ImageReference.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ImageReference.Serialization.cs @@ -64,6 +64,16 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("exactVersion"u8); writer.WriteStringValue(ExactVersion); } + if (Optional.IsDefined(SharedGalleryImageId)) + { + writer.WritePropertyName("sharedGalleryImageId"u8); + writer.WriteStringValue(SharedGalleryImageId); + } + if (Optional.IsDefined(CommunityGalleryImageId)) + { + writer.WritePropertyName("communityGalleryImageId"u8); + writer.WriteStringValue(CommunityGalleryImageId); + } if (options.Format != "W" && _serializedAdditionalRawData != null) { foreach (var item in _serializedAdditionalRawData) @@ -107,6 +117,8 @@ internal static ImageReference DeserializeImageReference(JsonElement element, Mo string version = default; string virtualMachineImageId = default; string exactVersion = default; + string sharedGalleryImageId = default; + string communityGalleryImageId = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -141,6 +153,16 @@ internal static ImageReference DeserializeImageReference(JsonElement element, Mo exactVersion = property.Value.GetString(); continue; } + if (property.NameEquals("sharedGalleryImageId"u8)) + { + sharedGalleryImageId = property.Value.GetString(); + continue; + } + if (property.NameEquals("communityGalleryImageId"u8)) + { + communityGalleryImageId = property.Value.GetString(); + continue; + } if (options.Format != "W") { rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); @@ -154,6 +176,8 @@ internal static ImageReference DeserializeImageReference(JsonElement element, Mo version, virtualMachineImageId, exactVersion, + sharedGalleryImageId, + communityGalleryImageId, serializedAdditionalRawData); } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ImageReference.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ImageReference.cs index 6c5cb5fc883f..67fa3216c0de 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/ImageReference.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ImageReference.cs @@ -59,10 +59,12 @@ public ImageReference() /// The offer type of the Azure Virtual Machines Marketplace Image. For example, UbuntuServer or WindowsServer. /// The SKU of the Azure Virtual Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. /// The version of the Azure Virtual Machines Marketplace Image. A value of 'latest' can be specified to select the latest version of an Image. If omitted, the default is 'latest'. - /// The ARM resource identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool will be created using this Image Id. This is of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} or /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} for always defaulting to the latest image version. This property is mutually exclusive with other ImageReference properties. The Azure Compute Gallery Image must have replicas in the same region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + /// The ARM resource identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool will be created using this Image Id. This is of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} or /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} for always defaulting to the latest image version. This property is mutually exclusive with other ImageReference properties. The Azure Compute Gallery Image must have replicas in the same region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://learn.microsoft.com/azure/batch/nodes-and-pools#virtual-network-vnet-and-firewall-configuration. /// The specific version of the platform image or marketplace image used to create the node. This read-only field differs from 'version' only if the value specified for 'version' when the pool was created was 'latest'. + /// The shared gallery image unique identifier. This property is mutually exclusive with other properties and can be fetched from shared gallery image GET call. + /// The community gallery image unique identifier. This property is mutually exclusive with other properties and can be fetched from community gallery image GET call. /// Keeps track of any properties unknown to the library. - internal ImageReference(string publisher, string offer, string sku, string version, string virtualMachineImageId, string exactVersion, IDictionary serializedAdditionalRawData) + internal ImageReference(string publisher, string offer, string sku, string version, string virtualMachineImageId, string exactVersion, string sharedGalleryImageId, string communityGalleryImageId, IDictionary serializedAdditionalRawData) { Publisher = publisher; Offer = offer; @@ -70,6 +72,8 @@ internal ImageReference(string publisher, string offer, string sku, string versi Version = version; VirtualMachineImageId = virtualMachineImageId; ExactVersion = exactVersion; + SharedGalleryImageId = sharedGalleryImageId; + CommunityGalleryImageId = communityGalleryImageId; _serializedAdditionalRawData = serializedAdditionalRawData; } @@ -81,9 +85,13 @@ internal ImageReference(string publisher, string offer, string sku, string versi public string Sku { get; set; } /// The version of the Azure Virtual Machines Marketplace Image. A value of 'latest' can be specified to select the latest version of an Image. If omitted, the default is 'latest'. public string Version { get; set; } - /// The ARM resource identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool will be created using this Image Id. This is of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} or /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} for always defaulting to the latest image version. This property is mutually exclusive with other ImageReference properties. The Azure Compute Gallery Image must have replicas in the same region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + /// The ARM resource identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool will be created using this Image Id. This is of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} or /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} for always defaulting to the latest image version. This property is mutually exclusive with other ImageReference properties. The Azure Compute Gallery Image must have replicas in the same region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://learn.microsoft.com/azure/batch/nodes-and-pools#virtual-network-vnet-and-firewall-configuration. public string VirtualMachineImageId { get; set; } /// The specific version of the platform image or marketplace image used to create the node. This read-only field differs from 'version' only if the value specified for 'version' when the pool was created was 'latest'. public string ExactVersion { get; } + /// The shared gallery image unique identifier. This property is mutually exclusive with other properties and can be fetched from shared gallery image GET call. + public string SharedGalleryImageId { get; set; } + /// The community gallery image unique identifier. This property is mutually exclusive with other properties and can be fetched from community gallery image GET call. + public string CommunityGalleryImageId { get; set; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/InboundNatPool.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/InboundNatPool.cs index 7dabe3fd9da0..ec5da9c432cd 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/InboundNatPool.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/InboundNatPool.cs @@ -51,7 +51,7 @@ public partial class InboundNatPool /// Initializes a new instance of . /// The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400. /// The protocol of the endpoint. - /// The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. + /// The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. /// The first port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. /// The last port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. /// is null. @@ -70,7 +70,7 @@ public InboundNatPool(string name, InboundEndpointProtocol protocol, int backend /// Initializes a new instance of . /// The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400. /// The protocol of the endpoint. - /// The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. + /// The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. /// The first port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. /// The last port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. /// A list of network security group rules that will be applied to the endpoint. The maximum number of rules that can be specified across all the endpoints on a Batch Pool is 25. If no network security group rules are specified, a default rule will be created to allow inbound access to the specified backendPort. If the maximum number of network security group rules is exceeded the request fails with HTTP status code 400. @@ -95,7 +95,7 @@ internal InboundNatPool() public string Name { get; set; } /// The protocol of the endpoint. public InboundEndpointProtocol Protocol { get; set; } - /// The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. + /// The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. public int BackendPort { get; set; } /// The first port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. public int FrontendPortRangeStart { get; set; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.Serialization.cs index 628df1104012..245dd5b55136 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.Serialization.cs @@ -34,8 +34,16 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit throw new FormatException($"The model {nameof(ManagedDisk)} does not support writing '{format}' format."); } - writer.WritePropertyName("storageAccountType"u8); - writer.WriteStringValue(StorageAccountType.ToString()); + if (Optional.IsDefined(StorageAccountType)) + { + writer.WritePropertyName("storageAccountType"u8); + writer.WriteStringValue(StorageAccountType.Value.ToString()); + } + if (Optional.IsDefined(SecurityProfile)) + { + writer.WritePropertyName("securityProfile"u8); + writer.WriteObjectValue(SecurityProfile, options); + } if (options.Format != "W" && _serializedAdditionalRawData != null) { foreach (var item in _serializedAdditionalRawData) @@ -73,23 +81,37 @@ internal static ManagedDisk DeserializeManagedDisk(JsonElement element, ModelRea { return null; } - StorageAccountType storageAccountType = default; + StorageAccountType? storageAccountType = default; + VMDiskSecurityProfile securityProfile = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) { if (property.NameEquals("storageAccountType"u8)) { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } storageAccountType = new StorageAccountType(property.Value.GetString()); continue; } + if (property.NameEquals("securityProfile"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + securityProfile = VMDiskSecurityProfile.DeserializeVMDiskSecurityProfile(property.Value, options); + continue; + } if (options.Format != "W") { rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); } } serializedAdditionalRawData = rawDataDictionary; - return new ManagedDisk(storageAccountType, serializedAdditionalRawData); + return new ManagedDisk(storageAccountType, securityProfile, serializedAdditionalRawData); } BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.cs index f8ada07e8f72..32a2f0e6cc65 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.cs @@ -46,27 +46,24 @@ public partial class ManagedDisk private IDictionary _serializedAdditionalRawData; /// Initializes a new instance of . - /// The storage account type for managed disk. - public ManagedDisk(StorageAccountType storageAccountType) + public ManagedDisk() { - StorageAccountType = storageAccountType; } /// Initializes a new instance of . /// The storage account type for managed disk. + /// Specifies the security profile settings for the managed disk. /// Keeps track of any properties unknown to the library. - internal ManagedDisk(StorageAccountType storageAccountType, IDictionary serializedAdditionalRawData) + internal ManagedDisk(StorageAccountType? storageAccountType, VMDiskSecurityProfile securityProfile, IDictionary serializedAdditionalRawData) { StorageAccountType = storageAccountType; + SecurityProfile = securityProfile; _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal ManagedDisk() - { - } - /// The storage account type for managed disk. - public StorageAccountType StorageAccountType { get; set; } + public StorageAccountType? StorageAccountType { get; set; } + /// Specifies the security profile settings for the managed disk. + public VMDiskSecurityProfile SecurityProfile { get; set; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkConfiguration.cs index e69a292a3501..90847b622aee 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkConfiguration.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkConfiguration.cs @@ -51,10 +51,10 @@ public NetworkConfiguration() } /// Initializes a new instance of . - /// The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. For Pools created with virtualMachineConfiguration only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. For Pools created with a virtual machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + /// The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. Only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication, including ports 29876 and 29877. Also enable outbound connections to Azure Storage on port 443. For more details see: https://learn.microsoft.com/azure/batch/nodes-and-pools#virtual-network-vnet-and-firewall-configuration. /// The scope of dynamic vnet assignment. - /// The configuration for endpoints on Compute Nodes in the Batch Pool. Pool endpoint configuration is only supported on Pools with the virtualMachineConfiguration property. - /// The Public IPAddress configuration for Compute Nodes in the Batch Pool. Public IP configuration property is only supported on Pools with the virtualMachineConfiguration property. + /// The configuration for endpoints on Compute Nodes in the Batch Pool. + /// The Public IPAddress configuration for Compute Nodes in the Batch Pool. /// Whether this pool should enable accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead to improved networking performance. For more details, see: https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. /// Keeps track of any properties unknown to the library. internal NetworkConfiguration(string subnetId, DynamicVNetAssignmentScope? dynamicVNetAssignmentScope, BatchPoolEndpointConfiguration endpointConfiguration, PublicIpAddressConfiguration publicIpAddressConfiguration, bool? enableAcceleratedNetworking, IDictionary serializedAdditionalRawData) @@ -67,13 +67,13 @@ internal NetworkConfiguration(string subnetId, DynamicVNetAssignmentScope? dynam _serializedAdditionalRawData = serializedAdditionalRawData; } - /// The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. For Pools created with virtualMachineConfiguration only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. For Pools created with a virtual machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + /// The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. Only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication, including ports 29876 and 29877. Also enable outbound connections to Azure Storage on port 443. For more details see: https://learn.microsoft.com/azure/batch/nodes-and-pools#virtual-network-vnet-and-firewall-configuration. public string SubnetId { get; set; } /// The scope of dynamic vnet assignment. public DynamicVNetAssignmentScope? DynamicVNetAssignmentScope { get; set; } - /// The configuration for endpoints on Compute Nodes in the Batch Pool. Pool endpoint configuration is only supported on Pools with the virtualMachineConfiguration property. + /// The configuration for endpoints on Compute Nodes in the Batch Pool. public BatchPoolEndpointConfiguration EndpointConfiguration { get; set; } - /// The Public IPAddress configuration for Compute Nodes in the Batch Pool. Public IP configuration property is only supported on Pools with the virtualMachineConfiguration property. + /// The Public IPAddress configuration for Compute Nodes in the Batch Pool. public PublicIpAddressConfiguration PublicIpAddressConfiguration { get; set; } /// Whether this pool should enable accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead to improved networking performance. For more details, see: https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. public bool? EnableAcceleratedNetworking { get; set; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileBlobContainerDestination.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileBlobContainerDestination.cs index 44d0f78eef82..2271ff331361 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileBlobContainerDestination.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileBlobContainerDestination.cs @@ -60,7 +60,7 @@ public OutputFileBlobContainerDestination(string containerUrl) /// The destination blob or virtual directory within the Azure Storage container. If filePattern refers to a specific file (i.e. contains no wildcards), then path is the name of the blob to which to upload that file. If filePattern contains one or more wildcards (and therefore may match multiple files), then path is the name of the blob virtual directory (which is prepended to each blob name) to which to upload the file(s). If omitted, file(s) are uploaded to the root of the container with a blob name matching their file name. /// The URL of the container within Azure Blob Storage to which to upload the file(s). If not using a managed identity, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. /// The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. - /// A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types. + /// A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://learn.microsoft.com/rest/api/storageservices/put-blob#request-headers-all-blob-types. /// Keeps track of any properties unknown to the library. internal OutputFileBlobContainerDestination(string path, string containerUrl, BatchNodeIdentityReference identityReference, IList uploadHeaders, IDictionary serializedAdditionalRawData) { @@ -82,7 +82,7 @@ internal OutputFileBlobContainerDestination() public string ContainerUrl { get; set; } /// The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. public BatchNodeIdentityReference IdentityReference { get; set; } - /// A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types. + /// A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://learn.microsoft.com/rest/api/storageservices/put-blob#request-headers-all-blob-types. public IList UploadHeaders { get; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityEncryptionTypes.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityEncryptionTypes.cs new file mode 100644 index 000000000000..2ab799148d35 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityEncryptionTypes.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// SecurityEncryptionTypes enums. + public readonly partial struct SecurityEncryptionTypes : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public SecurityEncryptionTypes(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string NonPersistedTPMValue = "NonPersistedTPM"; + private const string VMGuestStateOnlyValue = "VMGuestStateOnly"; + + /// NonPersistedTPM. + public static SecurityEncryptionTypes NonPersistedTPM { get; } = new SecurityEncryptionTypes(NonPersistedTPMValue); + /// VMGuestStateOnly. + public static SecurityEncryptionTypes VMGuestStateOnly { get; } = new SecurityEncryptionTypes(VMGuestStateOnlyValue); + /// Determines if two values are the same. + public static bool operator ==(SecurityEncryptionTypes left, SecurityEncryptionTypes right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(SecurityEncryptionTypes left, SecurityEncryptionTypes right) => !left.Equals(right); + /// Converts a to a . + public static implicit operator SecurityEncryptionTypes(string value) => new SecurityEncryptionTypes(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is SecurityEncryptionTypes other && Equals(other); + /// + public bool Equals(SecurityEncryptionTypes other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.cs index 4f81495d150a..d34a776e1368 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.cs @@ -46,7 +46,7 @@ public partial class SecurityProfile private IDictionary _serializedAdditionalRawData; /// Initializes a new instance of . - /// This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. + /// This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. For more information on encryption at host requirements, please refer to https://learn.microsoft.com/azure/virtual-machines/disk-encryption#supported-vm-sizes. /// Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. /// Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Specifies the security settings like secure boot and vTPM used while creating the virtual machine. /// is null. @@ -60,7 +60,7 @@ public SecurityProfile(bool encryptionAtHost, SecurityTypes securityType, UefiSe } /// Initializes a new instance of . - /// This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. + /// This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. For more information on encryption at host requirements, please refer to https://learn.microsoft.com/azure/virtual-machines/disk-encryption#supported-vm-sizes. /// Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. /// Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Specifies the security settings like secure boot and vTPM used while creating the virtual machine. /// Keeps track of any properties unknown to the library. @@ -77,7 +77,7 @@ internal SecurityProfile() { } - /// This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. + /// This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. For more information on encryption at host requirements, please refer to https://learn.microsoft.com/azure/virtual-machines/disk-encryption#supported-vm-sizes. public bool EncryptionAtHost { get; set; } /// Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. public SecurityTypes SecurityType { get; set; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityTypes.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityTypes.cs index 699dde2ed57c..bd2f2b9cd274 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityTypes.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityTypes.cs @@ -23,9 +23,12 @@ public SecurityTypes(string value) } private const string TrustedLaunchValue = "trustedLaunch"; + private const string ConfidentialVMValue = "confidentialVM"; /// Trusted launch protects against advanced and persistent attack techniques. public static SecurityTypes TrustedLaunch { get; } = new SecurityTypes(TrustedLaunchValue); + /// Azure confidential computing offers confidential VMs are for tenants with high security and confidentiality requirements. These VMs provide a strong, hardware-enforced boundary to help meet your security needs. You can use confidential VMs for migrations without making changes to your code, with the platform protecting your VM's state from being read or modified. + public static SecurityTypes ConfidentialVM { get; } = new SecurityTypes(ConfidentialVMValue); /// Determines if two values are the same. public static bool operator ==(SecurityTypes left, SecurityTypes right) => left.Equals(right); /// Determines if two values are not the same. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UpgradePolicy.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/UpgradePolicy.cs index ff0839122bf2..cb00df229578 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/UpgradePolicy.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/UpgradePolicy.cs @@ -55,7 +55,7 @@ public UpgradePolicy(UpgradeMode mode) /// Initializes a new instance of . /// Specifies the mode of an upgrade to virtual machines in the scale set.<br /><br /> Possible values are:<br /><br /> **Manual** - You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade action.<br /><br /> **Automatic** - All virtual machines in the scale set are automatically updated at the same time.<br /><br /> **Rolling** - Scale set performs updates in batches with an optional pause time in between. /// Configuration parameters used for performing automatic OS Upgrade. The configuration parameters used for performing automatic OS upgrade. - /// The configuration parameters used while performing a rolling upgrade. This property is only supported on Pools with the virtualMachineConfiguration property. + /// The configuration parameters used while performing a rolling upgrade. /// Keeps track of any properties unknown to the library. internal UpgradePolicy(UpgradeMode mode, AutomaticOsUpgradePolicy automaticOsUpgradePolicy, RollingUpgradePolicy rollingUpgradePolicy, IDictionary serializedAdditionalRawData) { @@ -74,7 +74,7 @@ internal UpgradePolicy() public UpgradeMode Mode { get; set; } /// Configuration parameters used for performing automatic OS Upgrade. The configuration parameters used for performing automatic OS upgrade. public AutomaticOsUpgradePolicy AutomaticOsUpgradePolicy { get; set; } - /// The configuration parameters used while performing a rolling upgrade. This property is only supported on Pools with the virtualMachineConfiguration property. + /// The configuration parameters used while performing a rolling upgrade. public RollingUpgradePolicy RollingUpgradePolicy { get; set; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/VMDiskSecurityProfile.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/VMDiskSecurityProfile.Serialization.cs new file mode 100644 index 000000000000..816cec43984a --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/VMDiskSecurityProfile.Serialization.cs @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class VMDiskSecurityProfile : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + writer.WriteStartObject(); + JsonModelWriteCore(writer, options); + writer.WriteEndObject(); + } + + /// The JSON writer. + /// The client options for reading and writing models. + protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(VMDiskSecurityProfile)} does not support writing '{format}' format."); + } + + if (Optional.IsDefined(SecurityEncryptionType)) + { + writer.WritePropertyName("securityEncryptionType"u8); + writer.WriteStringValue(SecurityEncryptionType.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + } + + VMDiskSecurityProfile IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(VMDiskSecurityProfile)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeVMDiskSecurityProfile(document.RootElement, options); + } + + internal static VMDiskSecurityProfile DeserializeVMDiskSecurityProfile(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + SecurityEncryptionTypes? securityEncryptionType = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("securityEncryptionType"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + securityEncryptionType = new SecurityEncryptionTypes(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new VMDiskSecurityProfile(securityEncryptionType, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(VMDiskSecurityProfile)} does not support writing '{options.Format}' format."); + } + } + + VMDiskSecurityProfile IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeVMDiskSecurityProfile(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(VMDiskSecurityProfile)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static VMDiskSecurityProfile FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); + return DeserializeVMDiskSecurityProfile(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/VMDiskSecurityProfile.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/VMDiskSecurityProfile.cs new file mode 100644 index 000000000000..2405e8506ee0 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/VMDiskSecurityProfile.cs @@ -0,0 +1,65 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Specifies the security profile settings for the managed disk. **Note**: It can only be set for Confidential VMs and required when using Confidential VMs. + public partial class VMDiskSecurityProfile + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public VMDiskSecurityProfile() + { + } + + /// Initializes a new instance of . + /// Specifies the EncryptionType of the managed disk. It is set to VMGuestStateOnly for encryption of just the VMGuestState blob, and NonPersistedTPM for not persisting firmware state in the VMGuestState blob. **Note**: It can be set for only Confidential VMs and is required when using Confidential VMs. + /// Keeps track of any properties unknown to the library. + internal VMDiskSecurityProfile(SecurityEncryptionTypes? securityEncryptionType, IDictionary serializedAdditionalRawData) + { + SecurityEncryptionType = securityEncryptionType; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Specifies the EncryptionType of the managed disk. It is set to VMGuestStateOnly for encryption of just the VMGuestState blob, and NonPersistedTPM for not persisting firmware state in the VMGuestState blob. **Note**: It can be set for only Confidential VMs and is required when using Confidential VMs. + public SecurityEncryptionTypes? SecurityEncryptionType { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineConfiguration.cs index 8923d6045e32..ea1946689c31 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineConfiguration.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineConfiguration.cs @@ -67,7 +67,7 @@ public VirtualMachineConfiguration(ImageReference imageReference, string nodeAge /// A reference to the Azure Virtual Machines Marketplace Image or the custom Virtual Machine Image to use. /// The SKU of the Batch Compute Node agent to be provisioned on Compute Nodes in the Pool. The Batch Compute Node agent is a program that runs on each Compute Node in the Pool, and provides the command-and-control interface between the Compute Node and the Batch service. There are different implementations of the Compute Node agent, known as SKUs, for different operating systems. You must specify a Compute Node agent SKU which matches the selected Image reference. To get the list of supported Compute Node agent SKUs along with their list of verified Image references, see the 'List supported Compute Node agent SKUs' operation. /// Windows operating system settings on the virtual machine. This property must not be specified if the imageReference property specifies a Linux OS Image. - /// The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. + /// The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://learn.microsoft.com/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://learn.microsoft.com/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. /// /// This only applies to Images that contain the Windows operating system, and /// should only be used when you hold valid on-premises licenses for the Compute @@ -115,7 +115,7 @@ internal VirtualMachineConfiguration() public string NodeAgentSkuId { get; set; } /// Windows operating system settings on the virtual machine. This property must not be specified if the imageReference property specifies a Linux OS Image. public WindowsConfiguration WindowsConfiguration { get; set; } - /// The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. + /// The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://learn.microsoft.com/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://learn.microsoft.com/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. public IList DataDisks { get; } /// /// This only applies to Images that contain the Windows operating system, and diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/WindowsUserConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/WindowsUserConfiguration.cs index 1f3173fcb231..c096f45f3294 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/WindowsUserConfiguration.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/WindowsUserConfiguration.cs @@ -51,7 +51,7 @@ public WindowsUserConfiguration() } /// Initializes a new instance of . - /// The login mode for the user. The default value for VirtualMachineConfiguration Pools is 'batch'. + /// The login mode for the user. The default is 'batch'. /// Keeps track of any properties unknown to the library. internal WindowsUserConfiguration(LoginMode? loginMode, IDictionary serializedAdditionalRawData) { @@ -59,7 +59,7 @@ internal WindowsUserConfiguration(LoginMode? loginMode, IDictionary The login mode for the user. The default value for VirtualMachineConfiguration Pools is 'batch'. + /// The login mode for the user. The default is 'batch'. public LoginMode? LoginMode { get; set; } } } diff --git a/sdk/batch/Azure.Compute.Batch/tests/Generated/Samples/Samples_BatchClient.cs b/sdk/batch/Azure.Compute.Batch/tests/Generated/Samples/Samples_BatchClient.cs index 274be47c20ed..10675b3f91bb 100644 --- a/sdk/batch/Azure.Compute.Batch/tests/Generated/Samples/Samples_BatchClient.cs +++ b/sdk/batch/Azure.Compute.Batch/tests/Generated/Samples/Samples_BatchClient.cs @@ -20,7 +20,7 @@ public partial class Samples_BatchClient { [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetApplication_GetApplications() + public void Example_BatchClient_GetApplication_GetApplications() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -36,7 +36,7 @@ public void Example_Batch_GetApplication_GetApplications() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetApplication_GetApplications_Async() + public async Task Example_BatchClient_GetApplication_GetApplications_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -52,7 +52,7 @@ public async Task Example_Batch_GetApplication_GetApplications_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetApplication_GetApplications_Convenience() + public void Example_BatchClient_GetApplication_GetApplications_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -63,7 +63,7 @@ public void Example_Batch_GetApplication_GetApplications_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetApplication_GetApplications_Convenience_Async() + public async Task Example_BatchClient_GetApplication_GetApplications_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -74,7 +74,7 @@ public async Task Example_Batch_GetApplication_GetApplications_Convenience_Async [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreatePool_CreatesAPoolWithAcceleratedNetworking() + public void Example_BatchClient_CreatePool_CreatesAPoolWithAcceleratedNetworking() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -108,7 +108,7 @@ public void Example_Batch_CreatePool_CreatesAPoolWithAcceleratedNetworking() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreatePool_CreatesAPoolWithAcceleratedNetworking_Async() + public async Task Example_BatchClient_CreatePool_CreatesAPoolWithAcceleratedNetworking_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -142,7 +142,7 @@ public async Task Example_Batch_CreatePool_CreatesAPoolWithAcceleratedNetworking [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreatePool_CreatesAPoolWithAcceleratedNetworking_Convenience() + public void Example_BatchClient_CreatePool_CreatesAPoolWithAcceleratedNetworking_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -168,7 +168,7 @@ public void Example_Batch_CreatePool_CreatesAPoolWithAcceleratedNetworking_Conve [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreatePool_CreatesAPoolWithAcceleratedNetworking_Convenience_Async() + public async Task Example_BatchClient_CreatePool_CreatesAPoolWithAcceleratedNetworking_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -194,7 +194,7 @@ public async Task Example_Batch_CreatePool_CreatesAPoolWithAcceleratedNetworking [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreatePool_CreatesAPoolWithMountDriveSpecified() + public void Example_BatchClient_CreatePool_CreatesAPoolWithMountDriveSpecified() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -275,7 +275,7 @@ public void Example_Batch_CreatePool_CreatesAPoolWithMountDriveSpecified() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreatePool_CreatesAPoolWithMountDriveSpecified_Async() + public async Task Example_BatchClient_CreatePool_CreatesAPoolWithMountDriveSpecified_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -356,7 +356,7 @@ public async Task Example_Batch_CreatePool_CreatesAPoolWithMountDriveSpecified_A [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreatePool_CreatesAPoolWithMountDriveSpecified_Convenience() + public void Example_BatchClient_CreatePool_CreatesAPoolWithMountDriveSpecified_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -407,7 +407,7 @@ public void Example_Batch_CreatePool_CreatesAPoolWithMountDriveSpecified_Conveni [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreatePool_CreatesAPoolWithMountDriveSpecified_Convenience_Async() + public async Task Example_BatchClient_CreatePool_CreatesAPoolWithMountDriveSpecified_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -458,7 +458,7 @@ public async Task Example_Batch_CreatePool_CreatesAPoolWithMountDriveSpecified_C [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWithOSDisk() + public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPoolWithOSDisk() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -504,7 +504,7 @@ public void Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWith [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWithOSDisk_Async() + public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPoolWithOSDisk_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -550,7 +550,7 @@ public async Task Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPo [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWithOSDisk_Convenience() + public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPoolWithOSDisk_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -573,7 +573,10 @@ public void Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWith }, Caching = CachingType.ReadWrite, DiskSizeGB = 100, - ManagedDisk = new ManagedDisk(StorageAccountType.StandardSSDLRS), + ManagedDisk = new ManagedDisk + { + StorageAccountType = StorageAccountType.StandardSSDLRS, + }, }, }, ResizeTimeout = XmlConvert.ToTimeSpan("PT15M"), @@ -587,7 +590,7 @@ public void Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWith [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWithOSDisk_Convenience_Async() + public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPoolWithOSDisk_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -610,7 +613,10 @@ public async Task Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPo }, Caching = CachingType.ReadWrite, DiskSizeGB = 100, - ManagedDisk = new ManagedDisk(StorageAccountType.StandardSSDLRS), + ManagedDisk = new ManagedDisk + { + StorageAccountType = StorageAccountType.StandardSSDLRS, + }, }, }, ResizeTimeout = XmlConvert.ToTimeSpan("PT15M"), @@ -624,7 +630,7 @@ public async Task Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPo [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreatePool_CreatesASimplePoolWithResourceTags() + public void Example_BatchClient_CreatePool_CreatesASimplePoolWithResourceTags() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -659,7 +665,7 @@ public void Example_Batch_CreatePool_CreatesASimplePoolWithResourceTags() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreatePool_CreatesASimplePoolWithResourceTags_Async() + public async Task Example_BatchClient_CreatePool_CreatesASimplePoolWithResourceTags_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -694,7 +700,7 @@ public async Task Example_Batch_CreatePool_CreatesASimplePoolWithResourceTags_As [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreatePool_CreatesASimplePoolWithResourceTags_Convenience() + public void Example_BatchClient_CreatePool_CreatesASimplePoolWithResourceTags_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -721,7 +727,7 @@ public void Example_Batch_CreatePool_CreatesASimplePoolWithResourceTags_Convenie [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreatePool_CreatesASimplePoolWithResourceTags_Convenience_Async() + public async Task Example_BatchClient_CreatePool_CreatesASimplePoolWithResourceTags_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -748,7 +754,7 @@ public async Task Example_Batch_CreatePool_CreatesASimplePoolWithResourceTags_Co [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreatePool_CreatesAPoolWithSecurityProfile() + public void Example_BatchClient_CreatePool_CreatesAPoolWithSecurityProfile() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -787,7 +793,7 @@ public void Example_Batch_CreatePool_CreatesAPoolWithSecurityProfile() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreatePool_CreatesAPoolWithSecurityProfile_Async() + public async Task Example_BatchClient_CreatePool_CreatesAPoolWithSecurityProfile_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -826,7 +832,7 @@ public async Task Example_Batch_CreatePool_CreatesAPoolWithSecurityProfile_Async [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreatePool_CreatesAPoolWithSecurityProfile_Convenience() + public void Example_BatchClient_CreatePool_CreatesAPoolWithSecurityProfile_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -854,7 +860,7 @@ public void Example_Batch_CreatePool_CreatesAPoolWithSecurityProfile_Convenience [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreatePool_CreatesAPoolWithSecurityProfile_Convenience_Async() + public async Task Example_BatchClient_CreatePool_CreatesAPoolWithSecurityProfile_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -882,7 +888,7 @@ public async Task Example_Batch_CreatePool_CreatesAPoolWithSecurityProfile_Conve [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPool() + public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPool() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -928,7 +934,7 @@ public void Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPool() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPool_Async() + public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPool_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -974,7 +980,7 @@ public async Task Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPo [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPool_Convenience() + public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPool_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1002,7 +1008,7 @@ public void Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPool_Con [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPool_Convenience_Async() + public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPool_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1030,7 +1036,7 @@ public async Task Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPo [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWithContainers() + public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPoolWithContainers() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1075,7 +1081,7 @@ public void Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWith [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWithContainers_Async() + public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPoolWithContainers_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1120,7 +1126,7 @@ public async Task Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPo [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWithContainers_Convenience() + public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPoolWithContainers_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1152,7 +1158,7 @@ public void Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWith [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWithContainers_Convenience_Async() + public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPoolWithContainers_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1184,7 +1190,7 @@ public async Task Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPo [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWithExtensions() + public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPoolWithExtensions() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1248,7 +1254,7 @@ public void Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWith [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWithExtensions_Async() + public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPoolWithExtensions_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1312,7 +1318,7 @@ public async Task Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPo [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWithExtensions_Convenience() + public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPoolWithExtensions_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1354,7 +1360,7 @@ public void Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWith [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWithExtensions_Convenience_Async() + public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPoolWithExtensions_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1396,7 +1402,7 @@ public async Task Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPo [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWithServiceArtifactReference() + public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPoolWithServiceArtifactReference() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1434,7 +1440,7 @@ public void Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWith [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWithServiceArtifactReference_Async() + public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPoolWithServiceArtifactReference_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1472,7 +1478,7 @@ public async Task Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPo [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWithServiceArtifactReference_Convenience() + public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPoolWithServiceArtifactReference_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1501,7 +1507,7 @@ public void Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWith [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPoolWithServiceArtifactReference_Convenience_Async() + public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPoolWithServiceArtifactReference_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1530,7 +1536,7 @@ public async Task Example_Batch_CreatePool_CreatesAVirtualMachineConfigurationPo [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_DeletePool_PoolDelete() + public void Example_BatchClient_DeletePool_PoolDelete() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1543,7 +1549,7 @@ public void Example_Batch_DeletePool_PoolDelete() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_DeletePool_PoolDelete_Async() + public async Task Example_BatchClient_DeletePool_PoolDelete_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1556,7 +1562,7 @@ public async Task Example_Batch_DeletePool_PoolDelete_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetPool_GetAPoolWithAcceleratedNetworking() + public void Example_BatchClient_GetPool_GetAPoolWithAcceleratedNetworking() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1570,7 +1576,7 @@ public void Example_Batch_GetPool_GetAPoolWithAcceleratedNetworking() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetPool_GetAPoolWithAcceleratedNetworking_Async() + public async Task Example_BatchClient_GetPool_GetAPoolWithAcceleratedNetworking_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1584,7 +1590,7 @@ public async Task Example_Batch_GetPool_GetAPoolWithAcceleratedNetworking_Async( [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetPool_GetAPoolWithAcceleratedNetworking_Convenience() + public void Example_BatchClient_GetPool_GetAPoolWithAcceleratedNetworking_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1595,7 +1601,7 @@ public void Example_Batch_GetPool_GetAPoolWithAcceleratedNetworking_Convenience( [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetPool_GetAPoolWithAcceleratedNetworking_Convenience_Async() + public async Task Example_BatchClient_GetPool_GetAPoolWithAcceleratedNetworking_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1606,7 +1612,7 @@ public async Task Example_Batch_GetPool_GetAPoolWithAcceleratedNetworking_Conven [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetPool_PoolGet() + public void Example_BatchClient_GetPool_PoolGet() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1620,7 +1626,7 @@ public void Example_Batch_GetPool_PoolGet() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetPool_PoolGet_Async() + public async Task Example_BatchClient_GetPool_PoolGet_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1634,7 +1640,7 @@ public async Task Example_Batch_GetPool_PoolGet_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetPool_PoolGet_Convenience() + public void Example_BatchClient_GetPool_PoolGet_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1645,7 +1651,7 @@ public void Example_Batch_GetPool_PoolGet_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetPool_PoolGet_Convenience_Async() + public async Task Example_BatchClient_GetPool_PoolGet_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1656,7 +1662,7 @@ public async Task Example_Batch_GetPool_PoolGet_Convenience_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithSecurityProfile() + public void Example_BatchClient_GetPool_GetAVirtualMachineConfigurationPoolWithSecurityProfile() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1670,7 +1676,7 @@ public void Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithSecurit [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithSecurityProfile_Async() + public async Task Example_BatchClient_GetPool_GetAVirtualMachineConfigurationPoolWithSecurityProfile_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1684,7 +1690,7 @@ public async Task Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithS [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithSecurityProfile_Convenience() + public void Example_BatchClient_GetPool_GetAVirtualMachineConfigurationPoolWithSecurityProfile_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1695,7 +1701,7 @@ public void Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithSecurit [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithSecurityProfile_Convenience_Async() + public async Task Example_BatchClient_GetPool_GetAVirtualMachineConfigurationPoolWithSecurityProfile_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1706,7 +1712,7 @@ public async Task Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithS [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithExtensions() + public void Example_BatchClient_GetPool_GetAVirtualMachineConfigurationPoolWithExtensions() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1720,7 +1726,7 @@ public void Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithExtensi [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithExtensions_Async() + public async Task Example_BatchClient_GetPool_GetAVirtualMachineConfigurationPoolWithExtensions_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1734,7 +1740,7 @@ public async Task Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithE [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithExtensions_Convenience() + public void Example_BatchClient_GetPool_GetAVirtualMachineConfigurationPoolWithExtensions_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1745,7 +1751,7 @@ public void Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithExtensi [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithExtensions_Convenience_Async() + public async Task Example_BatchClient_GetPool_GetAVirtualMachineConfigurationPoolWithExtensions_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1756,7 +1762,7 @@ public async Task Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithE [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetPool_AddAVirtualMachineConfigurationPoolWithOSDisk() + public void Example_BatchClient_GetPool_AddAVirtualMachineConfigurationPoolWithOSDisk() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1770,7 +1776,7 @@ public void Example_Batch_GetPool_AddAVirtualMachineConfigurationPoolWithOSDisk( [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetPool_AddAVirtualMachineConfigurationPoolWithOSDisk_Async() + public async Task Example_BatchClient_GetPool_AddAVirtualMachineConfigurationPoolWithOSDisk_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1784,7 +1790,7 @@ public async Task Example_Batch_GetPool_AddAVirtualMachineConfigurationPoolWithO [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetPool_AddAVirtualMachineConfigurationPoolWithOSDisk_Convenience() + public void Example_BatchClient_GetPool_AddAVirtualMachineConfigurationPoolWithOSDisk_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1795,7 +1801,7 @@ public void Example_Batch_GetPool_AddAVirtualMachineConfigurationPoolWithOSDisk_ [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetPool_AddAVirtualMachineConfigurationPoolWithOSDisk_Convenience_Async() + public async Task Example_BatchClient_GetPool_AddAVirtualMachineConfigurationPoolWithOSDisk_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1806,7 +1812,7 @@ public async Task Example_Batch_GetPool_AddAVirtualMachineConfigurationPoolWithO [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithServiceArtifactReference() + public void Example_BatchClient_GetPool_GetAVirtualMachineConfigurationPoolWithServiceArtifactReference() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1820,7 +1826,7 @@ public void Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithService [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithServiceArtifactReference_Async() + public async Task Example_BatchClient_GetPool_GetAVirtualMachineConfigurationPoolWithServiceArtifactReference_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1834,7 +1840,7 @@ public async Task Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithS [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithServiceArtifactReference_Convenience() + public void Example_BatchClient_GetPool_GetAVirtualMachineConfigurationPoolWithServiceArtifactReference_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1845,7 +1851,7 @@ public void Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithService [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithServiceArtifactReference_Convenience_Async() + public async Task Example_BatchClient_GetPool_GetAVirtualMachineConfigurationPoolWithServiceArtifactReference_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1856,7 +1862,7 @@ public async Task Example_Batch_GetPool_GetAVirtualMachineConfigurationPoolWithS [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_UpdatePool_PatchThePool() + public void Example_BatchClient_UpdatePool_PatchThePool() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1876,7 +1882,7 @@ public void Example_Batch_UpdatePool_PatchThePool() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_UpdatePool_PatchThePool_Async() + public async Task Example_BatchClient_UpdatePool_PatchThePool_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1896,7 +1902,7 @@ public async Task Example_Batch_UpdatePool_PatchThePool_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_DisablePoolAutoScale_DisablePoolAutoscale() + public void Example_BatchClient_DisablePoolAutoScale_DisablePoolAutoscale() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1909,7 +1915,7 @@ public void Example_Batch_DisablePoolAutoScale_DisablePoolAutoscale() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_DisablePoolAutoScale_DisablePoolAutoscale_Async() + public async Task Example_BatchClient_DisablePoolAutoScale_DisablePoolAutoscale_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1922,7 +1928,7 @@ public async Task Example_Batch_DisablePoolAutoScale_DisablePoolAutoscale_Async( [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_EnablePoolAutoScale_PoolEnableAutoscale() + public void Example_BatchClient_EnablePoolAutoScale_PoolEnableAutoscale() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1940,7 +1946,7 @@ public void Example_Batch_EnablePoolAutoScale_PoolEnableAutoscale() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_EnablePoolAutoScale_PoolEnableAutoscale_Async() + public async Task Example_BatchClient_EnablePoolAutoScale_PoolEnableAutoscale_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1958,7 +1964,7 @@ public async Task Example_Batch_EnablePoolAutoScale_PoolEnableAutoscale_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_EnablePoolAutoScale_PoolEnableAutoscale_Convenience() + public void Example_BatchClient_EnablePoolAutoScale_PoolEnableAutoscale_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1974,7 +1980,7 @@ public void Example_Batch_EnablePoolAutoScale_PoolEnableAutoscale_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_EnablePoolAutoScale_PoolEnableAutoscale_Convenience_Async() + public async Task Example_BatchClient_EnablePoolAutoScale_PoolEnableAutoscale_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -1990,7 +1996,7 @@ public async Task Example_Batch_EnablePoolAutoScale_PoolEnableAutoscale_Convenie [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_EvaluatePoolAutoScale_PoolEvaluateAutoscale() + public void Example_BatchClient_EvaluatePoolAutoScale_PoolEvaluateAutoscale() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2008,7 +2014,7 @@ public void Example_Batch_EvaluatePoolAutoScale_PoolEvaluateAutoscale() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_EvaluatePoolAutoScale_PoolEvaluateAutoscale_Async() + public async Task Example_BatchClient_EvaluatePoolAutoScale_PoolEvaluateAutoscale_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2026,7 +2032,7 @@ public async Task Example_Batch_EvaluatePoolAutoScale_PoolEvaluateAutoscale_Asyn [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_EvaluatePoolAutoScale_PoolEvaluateAutoscale_Convenience() + public void Example_BatchClient_EvaluatePoolAutoScale_PoolEvaluateAutoscale_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2038,7 +2044,7 @@ public void Example_Batch_EvaluatePoolAutoScale_PoolEvaluateAutoscale_Convenienc [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_EvaluatePoolAutoScale_PoolEvaluateAutoscale_Convenience_Async() + public async Task Example_BatchClient_EvaluatePoolAutoScale_PoolEvaluateAutoscale_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2050,7 +2056,7 @@ public async Task Example_Batch_EvaluatePoolAutoScale_PoolEvaluateAutoscale_Conv [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_ResizePool_PoolResize() + public void Example_BatchClient_ResizePool_PoolResize() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2068,7 +2074,7 @@ public void Example_Batch_ResizePool_PoolResize() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_ResizePool_PoolResize_Async() + public async Task Example_BatchClient_ResizePool_PoolResize_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2086,7 +2092,7 @@ public async Task Example_Batch_ResizePool_PoolResize_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_ResizePool_PoolResize_Convenience() + public void Example_BatchClient_ResizePool_PoolResize_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2102,7 +2108,7 @@ public void Example_Batch_ResizePool_PoolResize_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_ResizePool_PoolResize_Convenience_Async() + public async Task Example_BatchClient_ResizePool_PoolResize_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2118,7 +2124,7 @@ public async Task Example_Batch_ResizePool_PoolResize_Convenience_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_StopPoolResize_PoolStopResize() + public void Example_BatchClient_StopPoolResize_PoolStopResize() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2131,7 +2137,7 @@ public void Example_Batch_StopPoolResize_PoolStopResize() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_StopPoolResize_PoolStopResize_Async() + public async Task Example_BatchClient_StopPoolResize_PoolStopResize_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2144,7 +2150,7 @@ public async Task Example_Batch_StopPoolResize_PoolStopResize_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_ReplacePoolProperties_PoolUpdate() + public void Example_BatchClient_ReplacePoolProperties_PoolUpdate() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2156,6 +2162,7 @@ public void Example_Batch_ReplacePoolProperties_PoolUpdate() { commandLine = "/bin/bash -c 'echo start task'", }, + certificateReferences = Array.Empty(), applicationPackageReferences = Array.Empty(), metadata = Array.Empty(), }); @@ -2166,7 +2173,7 @@ public void Example_Batch_ReplacePoolProperties_PoolUpdate() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_ReplacePoolProperties_PoolUpdate_Async() + public async Task Example_BatchClient_ReplacePoolProperties_PoolUpdate_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2178,6 +2185,7 @@ public async Task Example_Batch_ReplacePoolProperties_PoolUpdate_Async() { commandLine = "/bin/bash -c 'echo start task'", }, + certificateReferences = Array.Empty(), applicationPackageReferences = Array.Empty(), metadata = Array.Empty(), }); @@ -2188,13 +2196,13 @@ public async Task Example_Batch_ReplacePoolProperties_PoolUpdate_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_ReplacePoolProperties_PoolUpdate_Convenience() + public void Example_BatchClient_ReplacePoolProperties_PoolUpdate_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolReplaceContent pool = new BatchPoolReplaceContent(Array.Empty(), Array.Empty()) + BatchPoolReplaceContent pool = new BatchPoolReplaceContent(Array.Empty(), Array.Empty(), Array.Empty()) { StartTask = new BatchStartTask("/bin/bash -c 'echo start task'"), }; @@ -2203,13 +2211,13 @@ public void Example_Batch_ReplacePoolProperties_PoolUpdate_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_ReplacePoolProperties_PoolUpdate_Convenience_Async() + public async Task Example_BatchClient_ReplacePoolProperties_PoolUpdate_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolReplaceContent pool = new BatchPoolReplaceContent(Array.Empty(), Array.Empty()) + BatchPoolReplaceContent pool = new BatchPoolReplaceContent(Array.Empty(), Array.Empty(), Array.Empty()) { StartTask = new BatchStartTask("/bin/bash -c 'echo start task'"), }; @@ -2218,7 +2226,7 @@ public async Task Example_Batch_ReplacePoolProperties_PoolUpdate_Convenience_Asy [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_RemoveNodes_PoolRemoveNodes() + public void Example_BatchClient_RemoveNodes_PoolRemoveNodes() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2239,7 +2247,7 @@ public void Example_Batch_RemoveNodes_PoolRemoveNodes() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_RemoveNodes_PoolRemoveNodes_Async() + public async Task Example_BatchClient_RemoveNodes_PoolRemoveNodes_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2260,7 +2268,7 @@ public async Task Example_Batch_RemoveNodes_PoolRemoveNodes_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_RemoveNodes_PoolRemoveNodes_Convenience() + public void Example_BatchClient_RemoveNodes_PoolRemoveNodes_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2272,7 +2280,7 @@ public void Example_Batch_RemoveNodes_PoolRemoveNodes_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_RemoveNodes_PoolRemoveNodes_Convenience_Async() + public async Task Example_BatchClient_RemoveNodes_PoolRemoveNodes_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2284,7 +2292,7 @@ public async Task Example_Batch_RemoveNodes_PoolRemoveNodes_Convenience_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_DeleteJob_DeleteJob() + public void Example_BatchClient_DeleteJob_DeleteJob() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2297,7 +2305,7 @@ public void Example_Batch_DeleteJob_DeleteJob() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_DeleteJob_DeleteJob_Async() + public async Task Example_BatchClient_DeleteJob_DeleteJob_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2310,7 +2318,7 @@ public async Task Example_Batch_DeleteJob_DeleteJob_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetJob_JobGet() + public void Example_BatchClient_GetJob_JobGet() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2324,7 +2332,7 @@ public void Example_Batch_GetJob_JobGet() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetJob_JobGet_Async() + public async Task Example_BatchClient_GetJob_JobGet_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2338,7 +2346,7 @@ public async Task Example_Batch_GetJob_JobGet_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetJob_JobGet_Convenience() + public void Example_BatchClient_GetJob_JobGet_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2349,7 +2357,7 @@ public void Example_Batch_GetJob_JobGet_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetJob_JobGet_Convenience_Async() + public async Task Example_BatchClient_GetJob_JobGet_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2360,7 +2368,7 @@ public async Task Example_Batch_GetJob_JobGet_Convenience_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_UpdateJob_JobPatch() + public void Example_BatchClient_UpdateJob_JobUpdate() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2386,7 +2394,7 @@ public void Example_Batch_UpdateJob_JobPatch() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_UpdateJob_JobPatch_Async() + public async Task Example_BatchClient_UpdateJob_JobUpdate_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2412,7 +2420,7 @@ public async Task Example_Batch_UpdateJob_JobPatch_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_ReplaceJob_JobUpdate() + public void Example_BatchClient_ReplaceJob_JobPatch() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2438,7 +2446,7 @@ public void Example_Batch_ReplaceJob_JobUpdate() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_ReplaceJob_JobUpdate_Async() + public async Task Example_BatchClient_ReplaceJob_JobPatch_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2464,7 +2472,7 @@ public async Task Example_Batch_ReplaceJob_JobUpdate_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_ReplaceJob_JobUpdate_Convenience() + public void Example_BatchClient_ReplaceJob_JobPatch_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2487,7 +2495,7 @@ public void Example_Batch_ReplaceJob_JobUpdate_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_ReplaceJob_JobUpdate_Convenience_Async() + public async Task Example_BatchClient_ReplaceJob_JobPatch_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2510,7 +2518,7 @@ public async Task Example_Batch_ReplaceJob_JobUpdate_Convenience_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_DisableJob_JobDisable() + public void Example_BatchClient_DisableJob_JobDisable() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2527,7 +2535,7 @@ public void Example_Batch_DisableJob_JobDisable() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_DisableJob_JobDisable_Async() + public async Task Example_BatchClient_DisableJob_JobDisable_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2544,7 +2552,7 @@ public async Task Example_Batch_DisableJob_JobDisable_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_DisableJob_JobDisable_Convenience() + public void Example_BatchClient_DisableJob_JobDisable_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2556,7 +2564,7 @@ public void Example_Batch_DisableJob_JobDisable_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_DisableJob_JobDisable_Convenience_Async() + public async Task Example_BatchClient_DisableJob_JobDisable_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2568,7 +2576,7 @@ public async Task Example_Batch_DisableJob_JobDisable_Convenience_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_EnableJob_JobEnable() + public void Example_BatchClient_EnableJob_JobEnable() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2581,7 +2589,7 @@ public void Example_Batch_EnableJob_JobEnable() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_EnableJob_JobEnable_Async() + public async Task Example_BatchClient_EnableJob_JobEnable_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2594,7 +2602,7 @@ public async Task Example_Batch_EnableJob_JobEnable_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_TerminateJob_JobTerminate() + public void Example_BatchClient_TerminateJob_JobTerminate() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2608,7 +2616,7 @@ public void Example_Batch_TerminateJob_JobTerminate() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_TerminateJob_JobTerminate_Async() + public async Task Example_BatchClient_TerminateJob_JobTerminate_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2622,7 +2630,7 @@ public async Task Example_Batch_TerminateJob_JobTerminate_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_TerminateJob_JobTerminate_Convenience() + public void Example_BatchClient_TerminateJob_JobTerminate_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2633,7 +2641,7 @@ public void Example_Batch_TerminateJob_JobTerminate_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_TerminateJob_JobTerminate_Convenience_Async() + public async Task Example_BatchClient_TerminateJob_JobTerminate_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2644,7 +2652,7 @@ public async Task Example_Batch_TerminateJob_JobTerminate_Convenience_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreateJob_CreatesABasicJob() + public void Example_BatchClient_CreateJob_CreatesABasicJob() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2666,7 +2674,7 @@ public void Example_Batch_CreateJob_CreatesABasicJob() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreateJob_CreatesABasicJob_Async() + public async Task Example_BatchClient_CreateJob_CreatesABasicJob_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2688,7 +2696,7 @@ public async Task Example_Batch_CreateJob_CreatesABasicJob_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreateJob_CreatesABasicJob_Convenience() + public void Example_BatchClient_CreateJob_CreatesABasicJob_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2706,7 +2714,7 @@ public void Example_Batch_CreateJob_CreatesABasicJob_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreateJob_CreatesABasicJob_Convenience_Async() + public async Task Example_BatchClient_CreateJob_CreatesABasicJob_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2724,7 +2732,7 @@ public async Task Example_Batch_CreateJob_CreatesABasicJob_Convenience_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreateJob_CreatesAComplexJob() + public void Example_BatchClient_CreateJob_CreatesAComplexJob() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2790,7 +2798,7 @@ public void Example_Batch_CreateJob_CreatesAComplexJob() poolLifetimeOption = "job", pool = new { - vmSize = "Standard_D1_v2", + vmSize = "STANDARD_D2S_V3", virtualMachineConfiguration = new { imageReference = new @@ -2801,6 +2809,14 @@ public void Example_Batch_CreateJob_CreatesAComplexJob() version = "latest", }, nodeAgentSKUId = "batch.node.windows amd64", + windowsConfiguration = new + { + enableAutomaticUpdates = false, + }, + nodePlacementConfiguration = new + { + policy = "zonal", + }, }, resizeTimeout = "PT15M", targetDedicatedNodes = 3, @@ -2842,6 +2858,20 @@ public void Example_Batch_CreateJob_CreatesAComplexJob() maxTaskRetryCount = 2, waitForSuccess = true, }, + certificateReferences = new object[] + { +new +{ +thumbprint = "0123456789abcdef0123456789abcdef01234567", +thumbprintAlgorithm = "sha1", +storeLocation = "localmachine", +storeName = "Root", +visibility = new object[] +{ +"task" +}, +} + }, metadata = new object[] { new @@ -2870,7 +2900,7 @@ public void Example_Batch_CreateJob_CreatesAComplexJob() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreateJob_CreatesAComplexJob_Async() + public async Task Example_BatchClient_CreateJob_CreatesAComplexJob_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -2936,7 +2966,7 @@ public async Task Example_Batch_CreateJob_CreatesAComplexJob_Async() poolLifetimeOption = "job", pool = new { - vmSize = "Standard_D1_v2", + vmSize = "STANDARD_D2S_V3", virtualMachineConfiguration = new { imageReference = new @@ -2947,6 +2977,14 @@ public async Task Example_Batch_CreateJob_CreatesAComplexJob_Async() version = "latest", }, nodeAgentSKUId = "batch.node.windows amd64", + windowsConfiguration = new + { + enableAutomaticUpdates = false, + }, + nodePlacementConfiguration = new + { + policy = "zonal", + }, }, resizeTimeout = "PT15M", targetDedicatedNodes = 3, @@ -2988,6 +3026,20 @@ public async Task Example_Batch_CreateJob_CreatesAComplexJob_Async() maxTaskRetryCount = 2, waitForSuccess = true, }, + certificateReferences = new object[] + { +new +{ +thumbprint = "0123456789abcdef0123456789abcdef01234567", +thumbprintAlgorithm = "sha1", +storeLocation = "localmachine", +storeName = "Root", +visibility = new object[] +{ +"task" +}, +} + }, metadata = new object[] { new @@ -3016,7 +3068,7 @@ public async Task Example_Batch_CreateJob_CreatesAComplexJob_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreateJob_CreatesAComplexJob_Convenience() + public void Example_BatchClient_CreateJob_CreatesAComplexJob_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3027,7 +3079,7 @@ public void Example_Batch_CreateJob_CreatesAComplexJob_Convenience() AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.Job) { AutoPoolIdPrefix = "mypool", - Pool = new BatchPoolSpecification("Standard_D1_v2") + Pool = new BatchPoolSpecification("STANDARD_D2S_V3") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference { @@ -3035,7 +3087,17 @@ public void Example_Batch_CreateJob_CreatesAComplexJob_Convenience() Offer = "WindowsServer", Sku = "2016-datacenter-smalldisk", Version = "latest", - }, "batch.node.windows amd64"), + }, "batch.node.windows amd64") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = false, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Zonal, + }, + }, TaskSlotsPerNode = 2, TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), ResizeTimeout = XmlConvert.ToTimeSpan("PT15M"), @@ -3065,6 +3127,12 @@ public void Example_Batch_CreateJob_CreatesAComplexJob_Convenience() MaxTaskRetryCount = 2, WaitForSuccess = true, }, + CertificateReferences = {new BatchCertificateReference("0123456789abcdef0123456789abcdef01234567", "sha1") +{ +StoreLocation = BatchCertificateStoreLocation.LocalMachine, +StoreName = "Root", +Visibility = {BatchCertificateVisibility.Task}, +}}, Metadata = { new MetadataItem("myproperty", "myvalue") }, TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, }, @@ -3117,7 +3185,7 @@ public void Example_Batch_CreateJob_CreatesAComplexJob_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreateJob_CreatesAComplexJob_Convenience_Async() + public async Task Example_BatchClient_CreateJob_CreatesAComplexJob_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3128,7 +3196,7 @@ public async Task Example_Batch_CreateJob_CreatesAComplexJob_Convenience_Async() AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.Job) { AutoPoolIdPrefix = "mypool", - Pool = new BatchPoolSpecification("Standard_D1_v2") + Pool = new BatchPoolSpecification("STANDARD_D2S_V3") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference { @@ -3136,7 +3204,17 @@ public async Task Example_Batch_CreateJob_CreatesAComplexJob_Convenience_Async() Offer = "WindowsServer", Sku = "2016-datacenter-smalldisk", Version = "latest", - }, "batch.node.windows amd64"), + }, "batch.node.windows amd64") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = false, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Zonal, + }, + }, TaskSlotsPerNode = 2, TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), ResizeTimeout = XmlConvert.ToTimeSpan("PT15M"), @@ -3166,6 +3244,12 @@ public async Task Example_Batch_CreateJob_CreatesAComplexJob_Convenience_Async() MaxTaskRetryCount = 2, WaitForSuccess = true, }, + CertificateReferences = {new BatchCertificateReference("0123456789abcdef0123456789abcdef01234567", "sha1") +{ +StoreLocation = BatchCertificateStoreLocation.LocalMachine, +StoreName = "Root", +Visibility = {BatchCertificateVisibility.Task}, +}}, Metadata = { new MetadataItem("myproperty", "myvalue") }, TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, }, @@ -3218,7 +3302,7 @@ public async Task Example_Batch_CreateJob_CreatesAComplexJob_Convenience_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetJobTaskCounts_JobGetTaskCounts() + public void Example_BatchClient_GetJobTaskCounts_JobGetTaskCounts() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3241,7 +3325,7 @@ public void Example_Batch_GetJobTaskCounts_JobGetTaskCounts() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetJobTaskCounts_JobGetTaskCounts_Async() + public async Task Example_BatchClient_GetJobTaskCounts_JobGetTaskCounts_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3264,7 +3348,7 @@ public async Task Example_Batch_GetJobTaskCounts_JobGetTaskCounts_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetJobTaskCounts_JobGetTaskCounts_Convenience() + public void Example_BatchClient_GetJobTaskCounts_JobGetTaskCounts_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3275,7 +3359,7 @@ public void Example_Batch_GetJobTaskCounts_JobGetTaskCounts_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetJobTaskCounts_JobGetTaskCounts_Convenience_Async() + public async Task Example_BatchClient_GetJobTaskCounts_JobGetTaskCounts_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3286,7 +3370,185 @@ public async Task Example_Batch_GetJobTaskCounts_JobGetTaskCounts_Convenience_As [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_DeleteJobSchedule_JobScheduleDelete() + public void Example_BatchClient_CreateCertificate_CertificateCreate() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + thumbprintAlgorithm = "sha1", + thumbprint = "0123456789abcdef0123456789abcdef01234567", + data = "#####...", + certificateFormat = "pfx", + password = "", + }); + Response response = client.CreateCertificate(content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_CreateCertificate_CertificateCreate_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + thumbprintAlgorithm = "sha1", + thumbprint = "0123456789abcdef0123456789abcdef01234567", + data = "#####...", + certificateFormat = "pfx", + password = "", + }); + Response response = await client.CreateCertificateAsync(content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_CreateCertificate_CertificateCreate_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchCertificate certificate = new BatchCertificate("0123456789abcdef0123456789abcdef01234567", "sha1", "#####...") + { + CertificateFormat = BatchCertificateFormat.Pfx, + Password = "", + }; + Response response = client.CreateCertificate(certificate); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_CreateCertificate_CertificateCreate_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchCertificate certificate = new BatchCertificate("0123456789abcdef0123456789abcdef01234567", "sha1", "#####...") + { + CertificateFormat = BatchCertificateFormat.Pfx, + Password = "", + }; + Response response = await client.CreateCertificateAsync(certificate); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_CancelCertificateDeletion_CertificateCancelDelete() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.CancelCertificateDeletion("sha1", "0123456789abcdef0123456789abcdef01234567"); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_CancelCertificateDeletion_CertificateCancelDelete_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.CancelCertificateDeletionAsync("sha1", "0123456789abcdef0123456789abcdef01234567"); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_DeleteCertificate_CertificateDelete() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.DeleteCertificate("sha1", "0123456789abcdef0123456789abcdef01234567"); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_DeleteCertificate_CertificateDelete_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.DeleteCertificateAsync("sha1", "0123456789abcdef0123456789abcdef01234567"); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_GetCertificate_CertificateGet() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetCertificate("sha1", "0123456789abcdef0123456789abcdef01234567", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("thumbprint").ToString()); + Console.WriteLine(result.GetProperty("thumbprintAlgorithm").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_GetCertificate_CertificateGet_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetCertificateAsync("sha1", "0123456789abcdef0123456789abcdef01234567", null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("thumbprint").ToString()); + Console.WriteLine(result.GetProperty("thumbprintAlgorithm").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_GetCertificate_CertificateGet_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetCertificate("sha1", "0123456789abcdef0123456789abcdef01234567"); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_GetCertificate_CertificateGet_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetCertificateAsync("sha1", "0123456789abcdef0123456789abcdef01234567"); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_DeleteJobSchedule_JobScheduleDelete() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3299,7 +3561,7 @@ public void Example_Batch_DeleteJobSchedule_JobScheduleDelete() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_DeleteJobSchedule_JobScheduleDelete_Async() + public async Task Example_BatchClient_DeleteJobSchedule_JobScheduleDelete_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3312,7 +3574,7 @@ public async Task Example_Batch_DeleteJobSchedule_JobScheduleDelete_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetJobSchedule_JobScheduleGet() + public void Example_BatchClient_GetJobSchedule_JobScheduleGet() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3326,7 +3588,7 @@ public void Example_Batch_GetJobSchedule_JobScheduleGet() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetJobSchedule_JobScheduleGet_Async() + public async Task Example_BatchClient_GetJobSchedule_JobScheduleGet_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3340,7 +3602,7 @@ public async Task Example_Batch_GetJobSchedule_JobScheduleGet_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetJobSchedule_JobScheduleGet_Convenience() + public void Example_BatchClient_GetJobSchedule_JobScheduleGet_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3351,7 +3613,7 @@ public void Example_Batch_GetJobSchedule_JobScheduleGet_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetJobSchedule_JobScheduleGet_Convenience_Async() + public async Task Example_BatchClient_GetJobSchedule_JobScheduleGet_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3362,7 +3624,7 @@ public async Task Example_Batch_GetJobSchedule_JobScheduleGet_Convenience_Async( [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_UpdateJobSchedule_JobSchedulePatch() + public void Example_BatchClient_UpdateJobSchedule_JobScheduleUpdate() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3396,7 +3658,7 @@ public void Example_Batch_UpdateJobSchedule_JobSchedulePatch() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_UpdateJobSchedule_JobSchedulePatch_Async() + public async Task Example_BatchClient_UpdateJobSchedule_JobScheduleUpdate_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3430,7 +3692,7 @@ public async Task Example_Batch_UpdateJobSchedule_JobSchedulePatch_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_ReplaceJobSchedule_JobScheduleUpdate() + public void Example_BatchClient_ReplaceJobSchedule_JobSchedulePatch() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3464,7 +3726,7 @@ public void Example_Batch_ReplaceJobSchedule_JobScheduleUpdate() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_ReplaceJobSchedule_JobScheduleUpdate_Async() + public async Task Example_BatchClient_ReplaceJobSchedule_JobSchedulePatch_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3498,7 +3760,7 @@ public async Task Example_Batch_ReplaceJobSchedule_JobScheduleUpdate_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_ReplaceJobSchedule_JobScheduleUpdate_Convenience() + public void Example_BatchClient_ReplaceJobSchedule_JobSchedulePatch_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3528,7 +3790,7 @@ public void Example_Batch_ReplaceJobSchedule_JobScheduleUpdate_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_ReplaceJobSchedule_JobScheduleUpdate_Convenience_Async() + public async Task Example_BatchClient_ReplaceJobSchedule_JobSchedulePatch_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3558,7 +3820,7 @@ public async Task Example_Batch_ReplaceJobSchedule_JobScheduleUpdate_Convenience [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_DisableJobSchedule_JobScheduleDisable() + public void Example_BatchClient_DisableJobSchedule_JobScheduleDisable() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3571,7 +3833,7 @@ public void Example_Batch_DisableJobSchedule_JobScheduleDisable() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_DisableJobSchedule_JobScheduleDisable_Async() + public async Task Example_BatchClient_DisableJobSchedule_JobScheduleDisable_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3584,7 +3846,7 @@ public async Task Example_Batch_DisableJobSchedule_JobScheduleDisable_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_EnableJobSchedule_JobScheduleEnable() + public void Example_BatchClient_EnableJobSchedule_JobScheduleEnable() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3597,7 +3859,7 @@ public void Example_Batch_EnableJobSchedule_JobScheduleEnable() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_EnableJobSchedule_JobScheduleEnable_Async() + public async Task Example_BatchClient_EnableJobSchedule_JobScheduleEnable_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3610,7 +3872,7 @@ public async Task Example_Batch_EnableJobSchedule_JobScheduleEnable_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_TerminateJobSchedule_JobScheduleTerminate() + public void Example_BatchClient_TerminateJobSchedule_JobScheduleTerminate() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3623,7 +3885,7 @@ public void Example_Batch_TerminateJobSchedule_JobScheduleTerminate() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_TerminateJobSchedule_JobScheduleTerminate_Async() + public async Task Example_BatchClient_TerminateJobSchedule_JobScheduleTerminate_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3636,7 +3898,7 @@ public async Task Example_Batch_TerminateJobSchedule_JobScheduleTerminate_Async( [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreateJobSchedule_CreatesABasicJobSchedule() + public void Example_BatchClient_CreateJobSchedule_CreatesABasicJobSchedule() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3664,7 +3926,7 @@ public void Example_Batch_CreateJobSchedule_CreatesABasicJobSchedule() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreateJobSchedule_CreatesABasicJobSchedule_Async() + public async Task Example_BatchClient_CreateJobSchedule_CreatesABasicJobSchedule_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3692,7 +3954,7 @@ public async Task Example_Batch_CreateJobSchedule_CreatesABasicJobSchedule_Async [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreateJobSchedule_CreatesABasicJobSchedule_Convenience() + public void Example_BatchClient_CreateJobSchedule_CreatesABasicJobSchedule_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3710,7 +3972,7 @@ public void Example_Batch_CreateJobSchedule_CreatesABasicJobSchedule_Convenience [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreateJobSchedule_CreatesABasicJobSchedule_Convenience_Async() + public async Task Example_BatchClient_CreateJobSchedule_CreatesABasicJobSchedule_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3728,7 +3990,7 @@ public async Task Example_Batch_CreateJobSchedule_CreatesABasicJobSchedule_Conve [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreateJobSchedule_CreatesAComplexJobScheduleAdd() + public void Example_BatchClient_CreateJobSchedule_CreatesAComplexJobScheduleAdd() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3803,7 +4065,7 @@ public void Example_Batch_CreateJobSchedule_CreatesAComplexJobScheduleAdd() poolLifetimeOption = "jobschedule", pool = new { - vmSize = "Standard_D1_v2", + vmSize = "STANDARD_D2S_V3", virtualMachineConfiguration = new { imageReference = new @@ -3814,6 +4076,14 @@ public void Example_Batch_CreateJobSchedule_CreatesAComplexJobScheduleAdd() version = "latest", }, nodeAgentSKUId = "batch.node.windows amd64", + windowsConfiguration = new + { + enableAutomaticUpdates = false, + }, + nodePlacementConfiguration = new + { + policy = "zonal", + }, }, resizeTimeout = "PT15M", targetDedicatedNodes = 3, @@ -3855,6 +4125,20 @@ public void Example_Batch_CreateJobSchedule_CreatesAComplexJobScheduleAdd() maxTaskRetryCount = 2, waitForSuccess = true, }, + certificateReferences = new object[] + { +new +{ +thumbprint = "0123456789abcdef0123456789abcdef01234567", +thumbprintAlgorithm = "sha1", +storeLocation = "localmachine", +storeName = "Root", +visibility = new object[] +{ +"task" +}, +} + }, metadata = new object[] { new @@ -3884,7 +4168,7 @@ public void Example_Batch_CreateJobSchedule_CreatesAComplexJobScheduleAdd() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreateJobSchedule_CreatesAComplexJobScheduleAdd_Async() + public async Task Example_BatchClient_CreateJobSchedule_CreatesAComplexJobScheduleAdd_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -3959,7 +4243,7 @@ public async Task Example_Batch_CreateJobSchedule_CreatesAComplexJobScheduleAdd_ poolLifetimeOption = "jobschedule", pool = new { - vmSize = "Standard_D1_v2", + vmSize = "STANDARD_D2S_V3", virtualMachineConfiguration = new { imageReference = new @@ -3970,6 +4254,14 @@ public async Task Example_Batch_CreateJobSchedule_CreatesAComplexJobScheduleAdd_ version = "latest", }, nodeAgentSKUId = "batch.node.windows amd64", + windowsConfiguration = new + { + enableAutomaticUpdates = false, + }, + nodePlacementConfiguration = new + { + policy = "zonal", + }, }, resizeTimeout = "PT15M", targetDedicatedNodes = 3, @@ -4011,6 +4303,20 @@ public async Task Example_Batch_CreateJobSchedule_CreatesAComplexJobScheduleAdd_ maxTaskRetryCount = 2, waitForSuccess = true, }, + certificateReferences = new object[] + { +new +{ +thumbprint = "0123456789abcdef0123456789abcdef01234567", +thumbprintAlgorithm = "sha1", +storeLocation = "localmachine", +storeName = "Root", +visibility = new object[] +{ +"task" +}, +} + }, metadata = new object[] { new @@ -4040,7 +4346,7 @@ public async Task Example_Batch_CreateJobSchedule_CreatesAComplexJobScheduleAdd_ [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreateJobSchedule_CreatesAComplexJobScheduleAdd_Convenience() + public void Example_BatchClient_CreateJobSchedule_CreatesAComplexJobScheduleAdd_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4057,7 +4363,7 @@ public void Example_Batch_CreateJobSchedule_CreatesAComplexJobScheduleAdd_Conven AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JobSchedule) { AutoPoolIdPrefix = "mypool", - Pool = new BatchPoolSpecification("Standard_D1_v2") + Pool = new BatchPoolSpecification("STANDARD_D2S_V3") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference { @@ -4065,7 +4371,17 @@ public void Example_Batch_CreateJobSchedule_CreatesAComplexJobScheduleAdd_Conven Offer = "WindowsServer", Sku = "2016-datacenter-smalldisk", Version = "latest", - }, "batch.node.windows amd64"), + }, "batch.node.windows amd64") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = false, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Zonal, + }, + }, TaskSlotsPerNode = 2, TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), ResizeTimeout = XmlConvert.ToTimeSpan("PT15M"), @@ -4095,6 +4411,12 @@ public void Example_Batch_CreateJobSchedule_CreatesAComplexJobScheduleAdd_Conven MaxTaskRetryCount = 2, WaitForSuccess = true, }, + CertificateReferences = {new BatchCertificateReference("0123456789abcdef0123456789abcdef01234567", "sha1") +{ +StoreLocation = BatchCertificateStoreLocation.LocalMachine, +StoreName = "Root", +Visibility = {BatchCertificateVisibility.Task}, +}}, Metadata = { new MetadataItem("myproperty", "myvalue") }, TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, }, @@ -4149,7 +4471,7 @@ public void Example_Batch_CreateJobSchedule_CreatesAComplexJobScheduleAdd_Conven [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreateJobSchedule_CreatesAComplexJobScheduleAdd_Convenience_Async() + public async Task Example_BatchClient_CreateJobSchedule_CreatesAComplexJobScheduleAdd_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4166,7 +4488,7 @@ public async Task Example_Batch_CreateJobSchedule_CreatesAComplexJobScheduleAdd_ AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JobSchedule) { AutoPoolIdPrefix = "mypool", - Pool = new BatchPoolSpecification("Standard_D1_v2") + Pool = new BatchPoolSpecification("STANDARD_D2S_V3") { VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference { @@ -4174,7 +4496,17 @@ public async Task Example_Batch_CreateJobSchedule_CreatesAComplexJobScheduleAdd_ Offer = "WindowsServer", Sku = "2016-datacenter-smalldisk", Version = "latest", - }, "batch.node.windows amd64"), + }, "batch.node.windows amd64") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = false, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Zonal, + }, + }, TaskSlotsPerNode = 2, TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), ResizeTimeout = XmlConvert.ToTimeSpan("PT15M"), @@ -4204,6 +4536,12 @@ public async Task Example_Batch_CreateJobSchedule_CreatesAComplexJobScheduleAdd_ MaxTaskRetryCount = 2, WaitForSuccess = true, }, + CertificateReferences = {new BatchCertificateReference("0123456789abcdef0123456789abcdef01234567", "sha1") +{ +StoreLocation = BatchCertificateStoreLocation.LocalMachine, +StoreName = "Root", +Visibility = {BatchCertificateVisibility.Task}, +}}, Metadata = { new MetadataItem("myproperty", "myvalue") }, TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, }, @@ -4258,7 +4596,7 @@ public async Task Example_Batch_CreateJobSchedule_CreatesAComplexJobScheduleAdd_ [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreateTask_CreatesABasicTask() + public void Example_BatchClient_CreateTask_CreatesABasicTask() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4276,7 +4614,7 @@ public void Example_Batch_CreateTask_CreatesABasicTask() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreateTask_CreatesABasicTask_Async() + public async Task Example_BatchClient_CreateTask_CreatesABasicTask_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4294,7 +4632,7 @@ public async Task Example_Batch_CreateTask_CreatesABasicTask_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreateTask_CreatesABasicTask_Convenience() + public void Example_BatchClient_CreateTask_CreatesABasicTask_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4306,7 +4644,7 @@ public void Example_Batch_CreateTask_CreatesABasicTask_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreateTask_CreatesABasicTask_Convenience_Async() + public async Task Example_BatchClient_CreateTask_CreatesABasicTask_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4318,7 +4656,7 @@ public async Task Example_Batch_CreateTask_CreatesABasicTask_Convenience_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreateTask_CreatesATaskWithContainerSettings() + public void Example_BatchClient_CreateTask_CreatesATaskWithContainerSettings() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4349,7 +4687,7 @@ public void Example_Batch_CreateTask_CreatesATaskWithContainerSettings() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreateTask_CreatesATaskWithContainerSettings_Async() + public async Task Example_BatchClient_CreateTask_CreatesATaskWithContainerSettings_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4380,7 +4718,7 @@ public async Task Example_Batch_CreateTask_CreatesATaskWithContainerSettings_Asy [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreateTask_CreatesATaskWithContainerSettings_Convenience() + public void Example_BatchClient_CreateTask_CreatesATaskWithContainerSettings_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4406,7 +4744,7 @@ public void Example_Batch_CreateTask_CreatesATaskWithContainerSettings_Convenien [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreateTask_CreatesATaskWithContainerSettings_Convenience_Async() + public async Task Example_BatchClient_CreateTask_CreatesATaskWithContainerSettings_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4432,7 +4770,7 @@ public async Task Example_Batch_CreateTask_CreatesATaskWithContainerSettings_Con [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreateTask_CreatesATaskWithExitConditions() + public void Example_BatchClient_CreateTask_CreatesATaskWithContainerSettingsWithDataIsolation() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4441,19 +4779,16 @@ public void Example_Batch_CreateTask_CreatesATaskWithExitConditions() using RequestContent content = RequestContent.Create(new { id = "taskId", - commandLine = "cmd /c exit 3", - exitConditions = new + commandLine = "bash -c 'echo hello'", + containerSettings = new { - exitCodeRanges = new object[] + imageName = "ubuntu", + containerHostBatchBindMounts = new object[] { new { -start = 2, -end = 4, -exitOptions = new -{ -jobAction = "terminate", -}, +source = "Task", +isReadOnly = true, } }, }, @@ -4473,7 +4808,7 @@ public void Example_Batch_CreateTask_CreatesATaskWithExitConditions() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreateTask_CreatesATaskWithExitConditions_Async() + public async Task Example_BatchClient_CreateTask_CreatesATaskWithContainerSettingsWithDataIsolation_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4482,19 +4817,16 @@ public async Task Example_Batch_CreateTask_CreatesATaskWithExitConditions_Async( using RequestContent content = RequestContent.Create(new { id = "taskId", - commandLine = "cmd /c exit 3", - exitConditions = new + commandLine = "bash -c 'echo hello'", + containerSettings = new { - exitCodeRanges = new object[] + imageName = "ubuntu", + containerHostBatchBindMounts = new object[] { new { -start = 2, -end = 4, -exitOptions = new -{ -jobAction = "terminate", -}, +source = "Task", +isReadOnly = true, } }, }, @@ -4514,20 +4846,21 @@ public async Task Example_Batch_CreateTask_CreatesATaskWithExitConditions_Async( [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreateTask_CreatesATaskWithExitConditions_Convenience() + public void Example_BatchClient_CreateTask_CreatesATaskWithContainerSettingsWithDataIsolation_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "cmd /c exit 3") + BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "bash -c 'echo hello'") { - ExitConditions = new ExitConditions + ContainerSettings = new BatchTaskContainerSettings("ubuntu") { - ExitCodeRanges = {new ExitCodeRangeMapping(2, 4, new ExitOptions + ContainerHostBatchBindMounts = {new ContainerHostBatchBindMountEntry { -JobAction = BatchJobAction.Terminate, -})}, +Source = ContainerHostDataPath.Task, +IsReadOnly = true, +}}, }, UserIdentity = new UserIdentity { @@ -4543,20 +4876,21 @@ public void Example_Batch_CreateTask_CreatesATaskWithExitConditions_Convenience( [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreateTask_CreatesATaskWithExitConditions_Convenience_Async() + public async Task Example_BatchClient_CreateTask_CreatesATaskWithContainerSettingsWithDataIsolation_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "cmd /c exit 3") + BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "bash -c 'echo hello'") { - ExitConditions = new ExitConditions + ContainerSettings = new BatchTaskContainerSettings("ubuntu") { - ExitCodeRanges = {new ExitCodeRangeMapping(2, 4, new ExitOptions + ContainerHostBatchBindMounts = {new ContainerHostBatchBindMountEntry { -JobAction = BatchJobAction.Terminate, -})}, +Source = ContainerHostDataPath.Task, +IsReadOnly = true, +}}, }, UserIdentity = new UserIdentity { @@ -4572,7 +4906,7 @@ public async Task Example_Batch_CreateTask_CreatesATaskWithExitConditions_Conven [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreateTask_CreatesATaskWithExtraSlotRequirement() + public void Example_BatchClient_CreateTask_CreatesATaskWithContainerSettingsWithDuplicateSource() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4580,8 +4914,302 @@ public void Example_Batch_CreateTask_CreatesATaskWithExtraSlotRequirement() using RequestContent content = RequestContent.Create(new { - id = "task1", - requiredSlots = 2, + id = "taskId", + commandLine = "bash -c 'echo hello'", + containerSettings = new + { + imageName = "ubuntu", + containerHostBatchBindMounts = new object[] + { +new +{ +source = "Task", +isReadOnly = true, +}, +new +{ +source = "Task", +isReadOnly = true, +} + }, + }, + userIdentity = new + { + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + }); + Response response = client.CreateTask("jobId", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_CreateTask_CreatesATaskWithContainerSettingsWithDuplicateSource_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "taskId", + commandLine = "bash -c 'echo hello'", + containerSettings = new + { + imageName = "ubuntu", + containerHostBatchBindMounts = new object[] + { +new +{ +source = "Task", +isReadOnly = true, +}, +new +{ +source = "Task", +isReadOnly = true, +} + }, + }, + userIdentity = new + { + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + }); + Response response = await client.CreateTaskAsync("jobId", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_CreateTask_CreatesATaskWithContainerSettingsWithDuplicateSource_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "bash -c 'echo hello'") + { + ContainerSettings = new BatchTaskContainerSettings("ubuntu") + { + ContainerHostBatchBindMounts = {new ContainerHostBatchBindMountEntry +{ +Source = ContainerHostDataPath.Task, +IsReadOnly = true, +}, new ContainerHostBatchBindMountEntry +{ +Source = ContainerHostDataPath.Task, +IsReadOnly = true, +}}, + }, + UserIdentity = new UserIdentity + { + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + }; + Response response = client.CreateTask("jobId", task); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_CreateTask_CreatesATaskWithContainerSettingsWithDuplicateSource_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "bash -c 'echo hello'") + { + ContainerSettings = new BatchTaskContainerSettings("ubuntu") + { + ContainerHostBatchBindMounts = {new ContainerHostBatchBindMountEntry +{ +Source = ContainerHostDataPath.Task, +IsReadOnly = true, +}, new ContainerHostBatchBindMountEntry +{ +Source = ContainerHostDataPath.Task, +IsReadOnly = true, +}}, + }, + UserIdentity = new UserIdentity + { + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + }; + Response response = await client.CreateTaskAsync("jobId", task); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_CreateTask_CreatesATaskWithExitConditions() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "taskId", + commandLine = "cmd /c exit 3", + exitConditions = new + { + exitCodeRanges = new object[] + { +new +{ +start = 2, +end = 4, +exitOptions = new +{ +jobAction = "terminate", +}, +} + }, + }, + userIdentity = new + { + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + }); + Response response = client.CreateTask("jobId", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_CreateTask_CreatesATaskWithExitConditions_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "taskId", + commandLine = "cmd /c exit 3", + exitConditions = new + { + exitCodeRanges = new object[] + { +new +{ +start = 2, +end = 4, +exitOptions = new +{ +jobAction = "terminate", +}, +} + }, + }, + userIdentity = new + { + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + }); + Response response = await client.CreateTaskAsync("jobId", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_CreateTask_CreatesATaskWithExitConditions_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "cmd /c exit 3") + { + ExitConditions = new ExitConditions + { + ExitCodeRanges = {new ExitCodeRangeMapping(2, 4, new ExitOptions +{ +JobAction = BatchJobAction.Terminate, +})}, + }, + UserIdentity = new UserIdentity + { + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + }; + Response response = client.CreateTask("jobId", task); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_CreateTask_CreatesATaskWithExitConditions_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "cmd /c exit 3") + { + ExitConditions = new ExitConditions + { + ExitCodeRanges = {new ExitCodeRangeMapping(2, 4, new ExitOptions +{ +JobAction = BatchJobAction.Terminate, +})}, + }, + UserIdentity = new UserIdentity + { + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + }; + Response response = await client.CreateTaskAsync("jobId", task); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_CreateTask_CreatesATaskWithExtraSlotRequirement() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "task1", + requiredSlots = 2, commandLine = "cmd /c echo task1", }); Response response = client.CreateTask("jobId", content); @@ -4591,7 +5219,7 @@ public void Example_Batch_CreateTask_CreatesATaskWithExtraSlotRequirement() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreateTask_CreatesATaskWithExtraSlotRequirement_Async() + public async Task Example_BatchClient_CreateTask_CreatesATaskWithExtraSlotRequirement_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4610,7 +5238,7 @@ public async Task Example_Batch_CreateTask_CreatesATaskWithExtraSlotRequirement_ [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreateTask_CreatesATaskWithExtraSlotRequirement_Convenience() + public void Example_BatchClient_CreateTask_CreatesATaskWithExtraSlotRequirement_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4625,7 +5253,7 @@ public void Example_Batch_CreateTask_CreatesATaskWithExtraSlotRequirement_Conven [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreateTask_CreatesATaskWithExtraSlotRequirement_Convenience_Async() + public async Task Example_BatchClient_CreateTask_CreatesATaskWithExtraSlotRequirement_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4640,7 +5268,7 @@ public async Task Example_Batch_CreateTask_CreatesATaskWithExtraSlotRequirement_ [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreateTaskCollection_CreatesABasicCollectionOfTasks() + public void Example_BatchClient_CreateTaskCollection_CreatesABasicCollectionOfTasks() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4670,7 +5298,7 @@ public void Example_Batch_CreateTaskCollection_CreatesABasicCollectionOfTasks() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreateTaskCollection_CreatesABasicCollectionOfTasks_Async() + public async Task Example_BatchClient_CreateTaskCollection_CreatesABasicCollectionOfTasks_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4700,7 +5328,7 @@ public async Task Example_Batch_CreateTaskCollection_CreatesABasicCollectionOfTa [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreateTaskCollection_CreatesABasicCollectionOfTasks_Convenience() + public void Example_BatchClient_CreateTaskCollection_CreatesABasicCollectionOfTasks_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4716,7 +5344,7 @@ public void Example_Batch_CreateTaskCollection_CreatesABasicCollectionOfTasks_Co [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreateTaskCollection_CreatesABasicCollectionOfTasks_Convenience_Async() + public async Task Example_BatchClient_CreateTaskCollection_CreatesABasicCollectionOfTasks_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4732,7 +5360,7 @@ public async Task Example_Batch_CreateTaskCollection_CreatesABasicCollectionOfTa [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreateTaskCollection_CreatesAComplexCollectionOfTasks() + public void Example_BatchClient_CreateTaskCollection_CreatesAComplexCollectionOfTasks() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4807,7 +5435,7 @@ public void Example_Batch_CreateTaskCollection_CreatesAComplexCollectionOfTasks( [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreateTaskCollection_CreatesAComplexCollectionOfTasks_Async() + public async Task Example_BatchClient_CreateTaskCollection_CreatesAComplexCollectionOfTasks_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4882,7 +5510,7 @@ public async Task Example_Batch_CreateTaskCollection_CreatesAComplexCollectionOf [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreateTaskCollection_CreatesAComplexCollectionOfTasks_Convenience() + public void Example_BatchClient_CreateTaskCollection_CreatesAComplexCollectionOfTasks_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4929,7 +5557,7 @@ public void Example_Batch_CreateTaskCollection_CreatesAComplexCollectionOfTasks_ [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreateTaskCollection_CreatesAComplexCollectionOfTasks_Convenience_Async() + public async Task Example_BatchClient_CreateTaskCollection_CreatesAComplexCollectionOfTasks_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4976,7 +5604,7 @@ public async Task Example_Batch_CreateTaskCollection_CreatesAComplexCollectionOf [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_DeleteTask_TaskDelete() + public void Example_BatchClient_DeleteTask_TaskDelete() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -4989,7 +5617,7 @@ public void Example_Batch_DeleteTask_TaskDelete() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_DeleteTask_TaskDelete_Async() + public async Task Example_BatchClient_DeleteTask_TaskDelete_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5002,7 +5630,7 @@ public async Task Example_Batch_DeleteTask_TaskDelete_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetTask_TaskGet() + public void Example_BatchClient_GetTask_TaskGet() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5016,7 +5644,7 @@ public void Example_Batch_GetTask_TaskGet() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetTask_TaskGet_Async() + public async Task Example_BatchClient_GetTask_TaskGet_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5030,7 +5658,7 @@ public async Task Example_Batch_GetTask_TaskGet_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetTask_TaskGet_Convenience() + public void Example_BatchClient_GetTask_TaskGet_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5041,7 +5669,7 @@ public void Example_Batch_GetTask_TaskGet_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetTask_TaskGet_Convenience_Async() + public async Task Example_BatchClient_GetTask_TaskGet_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5052,7 +5680,7 @@ public async Task Example_Batch_GetTask_TaskGet_Convenience_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_ReplaceTask_TaskUpdate() + public void Example_BatchClient_ReplaceTask_TaskUpdate() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5074,7 +5702,7 @@ public void Example_Batch_ReplaceTask_TaskUpdate() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_ReplaceTask_TaskUpdate_Async() + public async Task Example_BatchClient_ReplaceTask_TaskUpdate_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5096,7 +5724,7 @@ public async Task Example_Batch_ReplaceTask_TaskUpdate_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_ReplaceTask_TaskUpdate_Convenience() + public void Example_BatchClient_ReplaceTask_TaskUpdate_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5116,7 +5744,7 @@ public void Example_Batch_ReplaceTask_TaskUpdate_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_ReplaceTask_TaskUpdate_Convenience_Async() + public async Task Example_BatchClient_ReplaceTask_TaskUpdate_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5136,7 +5764,7 @@ public async Task Example_Batch_ReplaceTask_TaskUpdate_Convenience_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_TerminateTask_TaskTerminate() + public void Example_BatchClient_TerminateTask_TaskTerminate() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5149,7 +5777,7 @@ public void Example_Batch_TerminateTask_TaskTerminate() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_TerminateTask_TaskTerminate_Async() + public async Task Example_BatchClient_TerminateTask_TaskTerminate_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5162,7 +5790,7 @@ public async Task Example_Batch_TerminateTask_TaskTerminate_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_ReactivateTask_TaskReactivate() + public void Example_BatchClient_ReactivateTask_TaskReactivate() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5175,7 +5803,7 @@ public void Example_Batch_ReactivateTask_TaskReactivate() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_ReactivateTask_TaskReactivate_Async() + public async Task Example_BatchClient_ReactivateTask_TaskReactivate_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5188,7 +5816,7 @@ public async Task Example_Batch_ReactivateTask_TaskReactivate_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_DeleteTaskFile_FileDeleteFromTask() + public void Example_BatchClient_DeleteTaskFile_FileDeleteFromTask() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5201,7 +5829,7 @@ public void Example_Batch_DeleteTaskFile_FileDeleteFromTask() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_DeleteTaskFile_FileDeleteFromTask_Async() + public async Task Example_BatchClient_DeleteTaskFile_FileDeleteFromTask_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5214,7 +5842,7 @@ public async Task Example_Batch_DeleteTaskFile_FileDeleteFromTask_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetTaskFile_GetFileFromTask() + public void Example_BatchClient_GetTaskFile_GetFileFromTask() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5228,7 +5856,7 @@ public void Example_Batch_GetTaskFile_GetFileFromTask() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetTaskFile_GetFileFromTask_Async() + public async Task Example_BatchClient_GetTaskFile_GetFileFromTask_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5242,7 +5870,7 @@ public async Task Example_Batch_GetTaskFile_GetFileFromTask_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetTaskFile_GetFileFromTask_Convenience() + public void Example_BatchClient_GetTaskFile_GetFileFromTask_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5253,7 +5881,7 @@ public void Example_Batch_GetTaskFile_GetFileFromTask_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetTaskFile_GetFileFromTask_Convenience_Async() + public async Task Example_BatchClient_GetTaskFile_GetFileFromTask_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5264,7 +5892,7 @@ public async Task Example_Batch_GetTaskFile_GetFileFromTask_Convenience_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreateNodeUser_NodeCreateUser() + public void Example_BatchClient_CreateNodeUser_NodeCreateUser() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5284,7 +5912,7 @@ public void Example_Batch_CreateNodeUser_NodeCreateUser() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreateNodeUser_NodeCreateUser_Async() + public async Task Example_BatchClient_CreateNodeUser_NodeCreateUser_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5304,7 +5932,7 @@ public async Task Example_Batch_CreateNodeUser_NodeCreateUser_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_CreateNodeUser_NodeCreateUser_Convenience() + public void Example_BatchClient_CreateNodeUser_NodeCreateUser_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5321,7 +5949,7 @@ public void Example_Batch_CreateNodeUser_NodeCreateUser_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_CreateNodeUser_NodeCreateUser_Convenience_Async() + public async Task Example_BatchClient_CreateNodeUser_NodeCreateUser_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5338,7 +5966,7 @@ public async Task Example_Batch_CreateNodeUser_NodeCreateUser_Convenience_Async( [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_DeleteNodeUser_NodeDeleteUser() + public void Example_BatchClient_DeleteNodeUser_NodeDeleteUser() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5351,7 +5979,7 @@ public void Example_Batch_DeleteNodeUser_NodeDeleteUser() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_DeleteNodeUser_NodeDeleteUser_Async() + public async Task Example_BatchClient_DeleteNodeUser_NodeDeleteUser_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5364,7 +5992,7 @@ public async Task Example_Batch_DeleteNodeUser_NodeDeleteUser_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_ReplaceNodeUser_NodeUpdateUser() + public void Example_BatchClient_ReplaceNodeUser_NodeUpdateUser() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5382,7 +6010,7 @@ public void Example_Batch_ReplaceNodeUser_NodeUpdateUser() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_ReplaceNodeUser_NodeUpdateUser_Async() + public async Task Example_BatchClient_ReplaceNodeUser_NodeUpdateUser_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5400,7 +6028,7 @@ public async Task Example_Batch_ReplaceNodeUser_NodeUpdateUser_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_ReplaceNodeUser_NodeUpdateUser_Convenience() + public void Example_BatchClient_ReplaceNodeUser_NodeUpdateUser_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5416,7 +6044,7 @@ public void Example_Batch_ReplaceNodeUser_NodeUpdateUser_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_ReplaceNodeUser_NodeUpdateUser_Convenience_Async() + public async Task Example_BatchClient_ReplaceNodeUser_NodeUpdateUser_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5432,7 +6060,7 @@ public async Task Example_Batch_ReplaceNodeUser_NodeUpdateUser_Convenience_Async [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetNode_NodeGet() + public void Example_BatchClient_GetNode_NodeGet() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5446,7 +6074,7 @@ public void Example_Batch_GetNode_NodeGet() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetNode_NodeGet_Async() + public async Task Example_BatchClient_GetNode_NodeGet_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5460,7 +6088,7 @@ public async Task Example_Batch_GetNode_NodeGet_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetNode_NodeGet_Convenience() + public void Example_BatchClient_GetNode_NodeGet_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5471,7 +6099,7 @@ public void Example_Batch_GetNode_NodeGet_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetNode_NodeGet_Convenience_Async() + public async Task Example_BatchClient_GetNode_NodeGet_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5482,7 +6110,7 @@ public async Task Example_Batch_GetNode_NodeGet_Convenience_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_RebootNode_NodeReboot() + public void Example_BatchClient_RebootNode_NodeReboot() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5496,7 +6124,7 @@ public void Example_Batch_RebootNode_NodeReboot() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_RebootNode_NodeReboot_Async() + public async Task Example_BatchClient_RebootNode_NodeReboot_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5510,7 +6138,7 @@ public async Task Example_Batch_RebootNode_NodeReboot_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_RebootNode_NodeReboot_Convenience() + public void Example_BatchClient_RebootNode_NodeReboot_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5521,7 +6149,7 @@ public void Example_Batch_RebootNode_NodeReboot_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_RebootNode_NodeReboot_Convenience_Async() + public async Task Example_BatchClient_RebootNode_NodeReboot_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5532,7 +6160,133 @@ public async Task Example_Batch_RebootNode_NodeReboot_Convenience_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_DisableNodeScheduling_NodeDisableScheduling() + public void Example_BatchClient_StartNode_NodeStart() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.StartNode("poolId", "tvm-1695681911_1-20161122t193202z"); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_StartNode_NodeStart_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.StartNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z"); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_ReimageNode_NodeReimage() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = null; + Response response = client.ReimageNode("poolId", "tvm-1695681911_1-20161122t193202z", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_ReimageNode_NodeReimage_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = null; + Response response = await client.ReimageNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_ReimageNode_NodeReimage_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.ReimageNode("poolId", "tvm-1695681911_1-20161122t193202z"); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_ReimageNode_NodeReimage_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.ReimageNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z"); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_DeallocateNode_NodeDeallocate() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = null; + Response response = client.DeallocateNode("poolId", "tvm-1695681911_1-20161122t193202z", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_DeallocateNode_NodeDeallocate_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = null; + Response response = await client.DeallocateNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_DeallocateNode_NodeDeallocate_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.DeallocateNode("poolId", "tvm-1695681911_1-20161122t193202z"); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_DeallocateNode_NodeDeallocate_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.DeallocateNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z"); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_DisableNodeScheduling_NodeDisableScheduling() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5546,7 +6300,7 @@ public void Example_Batch_DisableNodeScheduling_NodeDisableScheduling() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_DisableNodeScheduling_NodeDisableScheduling_Async() + public async Task Example_BatchClient_DisableNodeScheduling_NodeDisableScheduling_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5560,7 +6314,7 @@ public async Task Example_Batch_DisableNodeScheduling_NodeDisableScheduling_Asyn [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_DisableNodeScheduling_NodeDisableScheduling_Convenience() + public void Example_BatchClient_DisableNodeScheduling_NodeDisableScheduling_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5571,7 +6325,7 @@ public void Example_Batch_DisableNodeScheduling_NodeDisableScheduling_Convenienc [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_DisableNodeScheduling_NodeDisableScheduling_Convenience_Async() + public async Task Example_BatchClient_DisableNodeScheduling_NodeDisableScheduling_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5582,7 +6336,7 @@ public async Task Example_Batch_DisableNodeScheduling_NodeDisableScheduling_Conv [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_EnableNodeScheduling_NodeEnableScheduling() + public void Example_BatchClient_EnableNodeScheduling_NodeEnableScheduling() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5595,7 +6349,7 @@ public void Example_Batch_EnableNodeScheduling_NodeEnableScheduling() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_EnableNodeScheduling_NodeEnableScheduling_Async() + public async Task Example_BatchClient_EnableNodeScheduling_NodeEnableScheduling_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5608,7 +6362,7 @@ public async Task Example_Batch_EnableNodeScheduling_NodeEnableScheduling_Async( [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetNodeRemoteLoginSettings_NodeGetRemoteLoginSettings() + public void Example_BatchClient_GetNodeRemoteLoginSettings_NodeGetRemoteLoginSettings() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5623,7 +6377,7 @@ public void Example_Batch_GetNodeRemoteLoginSettings_NodeGetRemoteLoginSettings( [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetNodeRemoteLoginSettings_NodeGetRemoteLoginSettings_Async() + public async Task Example_BatchClient_GetNodeRemoteLoginSettings_NodeGetRemoteLoginSettings_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5638,7 +6392,7 @@ public async Task Example_Batch_GetNodeRemoteLoginSettings_NodeGetRemoteLoginSet [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetNodeRemoteLoginSettings_NodeGetRemoteLoginSettings_Convenience() + public void Example_BatchClient_GetNodeRemoteLoginSettings_NodeGetRemoteLoginSettings_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5649,7 +6403,7 @@ public void Example_Batch_GetNodeRemoteLoginSettings_NodeGetRemoteLoginSettings_ [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetNodeRemoteLoginSettings_NodeGetRemoteLoginSettings_Convenience_Async() + public async Task Example_BatchClient_GetNodeRemoteLoginSettings_NodeGetRemoteLoginSettings_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5660,7 +6414,7 @@ public async Task Example_Batch_GetNodeRemoteLoginSettings_NodeGetRemoteLoginSet [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_UploadNodeLogs_UploadBatchServiceLogs() + public void Example_BatchClient_UploadNodeLogs_UploadBatchServiceLogs() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5680,7 +6434,7 @@ public void Example_Batch_UploadNodeLogs_UploadBatchServiceLogs() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_UploadNodeLogs_UploadBatchServiceLogs_Async() + public async Task Example_BatchClient_UploadNodeLogs_UploadBatchServiceLogs_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5700,7 +6454,7 @@ public async Task Example_Batch_UploadNodeLogs_UploadBatchServiceLogs_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_UploadNodeLogs_UploadBatchServiceLogs_Convenience() + public void Example_BatchClient_UploadNodeLogs_UploadBatchServiceLogs_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5712,7 +6466,7 @@ public void Example_Batch_UploadNodeLogs_UploadBatchServiceLogs_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_UploadNodeLogs_UploadBatchServiceLogs_Convenience_Async() + public async Task Example_BatchClient_UploadNodeLogs_UploadBatchServiceLogs_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5724,7 +6478,7 @@ public async Task Example_Batch_UploadNodeLogs_UploadBatchServiceLogs_Convenienc [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetNodeExtension_GetBatchNodeExtension() + public void Example_BatchClient_GetNodeExtension_GetBatchNodeExtension() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5738,7 +6492,7 @@ public void Example_Batch_GetNodeExtension_GetBatchNodeExtension() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetNodeExtension_GetBatchNodeExtension_Async() + public async Task Example_BatchClient_GetNodeExtension_GetBatchNodeExtension_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5752,7 +6506,7 @@ public async Task Example_Batch_GetNodeExtension_GetBatchNodeExtension_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetNodeExtension_GetBatchNodeExtension_Convenience() + public void Example_BatchClient_GetNodeExtension_GetBatchNodeExtension_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5763,7 +6517,7 @@ public void Example_Batch_GetNodeExtension_GetBatchNodeExtension_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetNodeExtension_GetBatchNodeExtension_Convenience_Async() + public async Task Example_BatchClient_GetNodeExtension_GetBatchNodeExtension_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5774,7 +6528,7 @@ public async Task Example_Batch_GetNodeExtension_GetBatchNodeExtension_Convenien [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_DeleteNodeFile_FileDeleteFromNode() + public void Example_BatchClient_DeleteNodeFile_FileDeleteFromNode() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5787,7 +6541,7 @@ public void Example_Batch_DeleteNodeFile_FileDeleteFromNode() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_DeleteNodeFile_FileDeleteFromNode_Async() + public async Task Example_BatchClient_DeleteNodeFile_FileDeleteFromNode_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5800,7 +6554,7 @@ public async Task Example_Batch_DeleteNodeFile_FileDeleteFromNode_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetNodeFile_GetFileFromComputeNode() + public void Example_BatchClient_GetNodeFile_GetFileFromComputeNode() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5814,7 +6568,7 @@ public void Example_Batch_GetNodeFile_GetFileFromComputeNode() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetNodeFile_GetFileFromComputeNode_Async() + public async Task Example_BatchClient_GetNodeFile_GetFileFromComputeNode_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5828,7 +6582,7 @@ public async Task Example_Batch_GetNodeFile_GetFileFromComputeNode_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetNodeFile_GetFileFromComputeNode_Convenience() + public void Example_BatchClient_GetNodeFile_GetFileFromComputeNode_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5839,7 +6593,7 @@ public void Example_Batch_GetNodeFile_GetFileFromComputeNode_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetNodeFile_GetFileFromComputeNode_Convenience_Async() + public async Task Example_BatchClient_GetNodeFile_GetFileFromComputeNode_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5850,7 +6604,7 @@ public async Task Example_Batch_GetNodeFile_GetFileFromComputeNode_Convenience_A [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetApplications_ListApplications() + public void Example_BatchClient_GetApplications_ListApplications() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5867,7 +6621,7 @@ public void Example_Batch_GetApplications_ListApplications() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetApplications_ListApplications_Async() + public async Task Example_BatchClient_GetApplications_ListApplications_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5884,7 +6638,7 @@ public async Task Example_Batch_GetApplications_ListApplications_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetApplications_ListApplications_Convenience() + public void Example_BatchClient_GetApplications_ListApplications_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5897,7 +6651,7 @@ public void Example_Batch_GetApplications_ListApplications_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetApplications_ListApplications_Convenience_Async() + public async Task Example_BatchClient_GetApplications_ListApplications_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5910,7 +6664,7 @@ public async Task Example_Batch_GetApplications_ListApplications_Convenience_Asy [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetPoolUsageMetrics_PoolListUsageMetrics() + public void Example_BatchClient_GetPoolUsageMetrics_PoolListUsageMetrics() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5929,7 +6683,7 @@ public void Example_Batch_GetPoolUsageMetrics_PoolListUsageMetrics() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetPoolUsageMetrics_PoolListUsageMetrics_Async() + public async Task Example_BatchClient_GetPoolUsageMetrics_PoolListUsageMetrics_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5948,7 +6702,7 @@ public async Task Example_Batch_GetPoolUsageMetrics_PoolListUsageMetrics_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetPoolUsageMetrics_PoolListUsageMetrics_Convenience() + public void Example_BatchClient_GetPoolUsageMetrics_PoolListUsageMetrics_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5961,7 +6715,7 @@ public void Example_Batch_GetPoolUsageMetrics_PoolListUsageMetrics_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetPoolUsageMetrics_PoolListUsageMetrics_Convenience_Async() + public async Task Example_BatchClient_GetPoolUsageMetrics_PoolListUsageMetrics_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5974,7 +6728,7 @@ public async Task Example_Batch_GetPoolUsageMetrics_PoolListUsageMetrics_Conveni [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetPools_PoolList() + public void Example_BatchClient_GetPools_PoolList() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -5989,7 +6743,7 @@ public void Example_Batch_GetPools_PoolList() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetPools_PoolList_Async() + public async Task Example_BatchClient_GetPools_PoolList_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6004,7 +6758,7 @@ public async Task Example_Batch_GetPools_PoolList_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetPools_PoolList_Convenience() + public void Example_BatchClient_GetPools_PoolList_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6017,7 +6771,7 @@ public void Example_Batch_GetPools_PoolList_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetPools_PoolList_Convenience_Async() + public async Task Example_BatchClient_GetPools_PoolList_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6030,7 +6784,7 @@ public async Task Example_Batch_GetPools_PoolList_Convenience_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetSupportedImages_AccountListNodeAgentSkus() + public void Example_BatchClient_GetSupportedImages_AccountListNodeAgentSkus() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6048,7 +6802,7 @@ public void Example_Batch_GetSupportedImages_AccountListNodeAgentSkus() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetSupportedImages_AccountListNodeAgentSkus_Async() + public async Task Example_BatchClient_GetSupportedImages_AccountListNodeAgentSkus_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6066,7 +6820,7 @@ public async Task Example_Batch_GetSupportedImages_AccountListNodeAgentSkus_Asyn [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetSupportedImages_AccountListNodeAgentSkus_Convenience() + public void Example_BatchClient_GetSupportedImages_AccountListNodeAgentSkus_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6079,7 +6833,7 @@ public void Example_Batch_GetSupportedImages_AccountListNodeAgentSkus_Convenienc [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetSupportedImages_AccountListNodeAgentSkus_Convenience_Async() + public async Task Example_BatchClient_GetSupportedImages_AccountListNodeAgentSkus_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6092,7 +6846,7 @@ public async Task Example_Batch_GetSupportedImages_AccountListNodeAgentSkus_Conv [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetPoolNodeCounts_NodeCountsPayload() + public void Example_BatchClient_GetPoolNodeCounts_NodeCountsPayload() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6107,7 +6861,7 @@ public void Example_Batch_GetPoolNodeCounts_NodeCountsPayload() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetPoolNodeCounts_NodeCountsPayload_Async() + public async Task Example_BatchClient_GetPoolNodeCounts_NodeCountsPayload_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6122,7 +6876,7 @@ public async Task Example_Batch_GetPoolNodeCounts_NodeCountsPayload_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetPoolNodeCounts_NodeCountsPayload_Convenience() + public void Example_BatchClient_GetPoolNodeCounts_NodeCountsPayload_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6135,7 +6889,7 @@ public void Example_Batch_GetPoolNodeCounts_NodeCountsPayload_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetPoolNodeCounts_NodeCountsPayload_Convenience_Async() + public async Task Example_BatchClient_GetPoolNodeCounts_NodeCountsPayload_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6148,7 +6902,7 @@ public async Task Example_Batch_GetPoolNodeCounts_NodeCountsPayload_Convenience_ [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetJobs_JobList() + public void Example_BatchClient_GetJobs_JobList() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6163,7 +6917,7 @@ public void Example_Batch_GetJobs_JobList() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetJobs_JobList_Async() + public async Task Example_BatchClient_GetJobs_JobList_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6178,7 +6932,7 @@ public async Task Example_Batch_GetJobs_JobList_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetJobs_JobList_Convenience() + public void Example_BatchClient_GetJobs_JobList_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6191,7 +6945,7 @@ public void Example_Batch_GetJobs_JobList_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetJobs_JobList_Convenience_Async() + public async Task Example_BatchClient_GetJobs_JobList_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6204,7 +6958,7 @@ public async Task Example_Batch_GetJobs_JobList_Convenience_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetJobsFromSchedules_ListJobUnderJobSchedule() + public void Example_BatchClient_GetJobsFromSchedules_ListJobUnderJobSchedule() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6219,7 +6973,7 @@ public void Example_Batch_GetJobsFromSchedules_ListJobUnderJobSchedule() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetJobsFromSchedules_ListJobUnderJobSchedule_Async() + public async Task Example_BatchClient_GetJobsFromSchedules_ListJobUnderJobSchedule_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6234,7 +6988,7 @@ public async Task Example_Batch_GetJobsFromSchedules_ListJobUnderJobSchedule_Asy [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetJobsFromSchedules_ListJobUnderJobSchedule_Convenience() + public void Example_BatchClient_GetJobsFromSchedules_ListJobUnderJobSchedule_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6247,7 +7001,7 @@ public void Example_Batch_GetJobsFromSchedules_ListJobUnderJobSchedule_Convenien [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetJobsFromSchedules_ListJobUnderJobSchedule_Convenience_Async() + public async Task Example_BatchClient_GetJobsFromSchedules_ListJobUnderJobSchedule_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6260,7 +7014,7 @@ public async Task Example_Batch_GetJobsFromSchedules_ListJobUnderJobSchedule_Con [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetJobPreparationAndReleaseTaskStatuses_JobListPreparationAndReleaseTaskStatus() + public void Example_BatchClient_GetJobPreparationAndReleaseTaskStatuses_JobListPreparationAndReleaseTaskStatus() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6275,7 +7029,7 @@ public void Example_Batch_GetJobPreparationAndReleaseTaskStatuses_JobListPrepara [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetJobPreparationAndReleaseTaskStatuses_JobListPreparationAndReleaseTaskStatus_Async() + public async Task Example_BatchClient_GetJobPreparationAndReleaseTaskStatuses_JobListPreparationAndReleaseTaskStatus_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6290,7 +7044,7 @@ public async Task Example_Batch_GetJobPreparationAndReleaseTaskStatuses_JobListP [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetJobPreparationAndReleaseTaskStatuses_JobListPreparationAndReleaseTaskStatus_Convenience() + public void Example_BatchClient_GetJobPreparationAndReleaseTaskStatuses_JobListPreparationAndReleaseTaskStatus_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6303,7 +7057,7 @@ public void Example_Batch_GetJobPreparationAndReleaseTaskStatuses_JobListPrepara [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetJobPreparationAndReleaseTaskStatuses_JobListPreparationAndReleaseTaskStatus_Convenience_Async() + public async Task Example_BatchClient_GetJobPreparationAndReleaseTaskStatuses_JobListPreparationAndReleaseTaskStatus_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6316,7 +7070,67 @@ public async Task Example_Batch_GetJobPreparationAndReleaseTaskStatuses_JobListP [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetJobSchedules_JobScheduleList() + public void Example_BatchClient_GetCertificates_CertificateList() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetCertificates(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("thumbprint").ToString()); + Console.WriteLine(result.GetProperty("thumbprintAlgorithm").ToString()); + Console.WriteLine(result.GetProperty("data").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_GetCertificates_CertificateList_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BinaryData item in client.GetCertificatesAsync(null, DateTimeOffset.Parse("Fri, 17 Feb 2017 00:00:00 GMT"), null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("thumbprint").ToString()); + Console.WriteLine(result.GetProperty("thumbprintAlgorithm").ToString()); + Console.WriteLine(result.GetProperty("data").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_GetCertificates_CertificateList_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchCertificate item in client.GetCertificates()) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_BatchClient_GetCertificates_CertificateList_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BatchCertificate item in client.GetCertificatesAsync()) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_BatchClient_GetJobSchedules_JobScheduleList() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6331,7 +7145,7 @@ public void Example_Batch_GetJobSchedules_JobScheduleList() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetJobSchedules_JobScheduleList_Async() + public async Task Example_BatchClient_GetJobSchedules_JobScheduleList_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6346,7 +7160,7 @@ public async Task Example_Batch_GetJobSchedules_JobScheduleList_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetJobSchedules_JobScheduleList_Convenience() + public void Example_BatchClient_GetJobSchedules_JobScheduleList_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6359,7 +7173,7 @@ public void Example_Batch_GetJobSchedules_JobScheduleList_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetJobSchedules_JobScheduleList_Convenience_Async() + public async Task Example_BatchClient_GetJobSchedules_JobScheduleList_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6372,7 +7186,7 @@ public async Task Example_Batch_GetJobSchedules_JobScheduleList_Convenience_Asyn [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetTasks_TaskList() + public void Example_BatchClient_GetTasks_TaskList() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6387,7 +7201,7 @@ public void Example_Batch_GetTasks_TaskList() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetTasks_TaskList_Async() + public async Task Example_BatchClient_GetTasks_TaskList_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6402,7 +7216,7 @@ public async Task Example_Batch_GetTasks_TaskList_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetTasks_TaskList_Convenience() + public void Example_BatchClient_GetTasks_TaskList_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6415,7 +7229,7 @@ public void Example_Batch_GetTasks_TaskList_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetTasks_TaskList_Convenience_Async() + public async Task Example_BatchClient_GetTasks_TaskList_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6428,7 +7242,7 @@ public async Task Example_Batch_GetTasks_TaskList_Convenience_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetSubTasks_TaskListSubtasks() + public void Example_BatchClient_GetSubTasks_TaskListSubtasks() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6443,7 +7257,7 @@ public void Example_Batch_GetSubTasks_TaskListSubtasks() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetSubTasks_TaskListSubtasks_Async() + public async Task Example_BatchClient_GetSubTasks_TaskListSubtasks_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6458,7 +7272,7 @@ public async Task Example_Batch_GetSubTasks_TaskListSubtasks_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetSubTasks_TaskListSubtasks_Convenience() + public void Example_BatchClient_GetSubTasks_TaskListSubtasks_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6471,7 +7285,7 @@ public void Example_Batch_GetSubTasks_TaskListSubtasks_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetSubTasks_TaskListSubtasks_Convenience_Async() + public async Task Example_BatchClient_GetSubTasks_TaskListSubtasks_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6484,7 +7298,7 @@ public async Task Example_Batch_GetSubTasks_TaskListSubtasks_Convenience_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetTaskFiles_FileListFromTask() + public void Example_BatchClient_GetTaskFiles_FileListFromTask() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6499,7 +7313,7 @@ public void Example_Batch_GetTaskFiles_FileListFromTask() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetTaskFiles_FileListFromTask_Async() + public async Task Example_BatchClient_GetTaskFiles_FileListFromTask_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6514,7 +7328,7 @@ public async Task Example_Batch_GetTaskFiles_FileListFromTask_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetTaskFiles_FileListFromTask_Convenience() + public void Example_BatchClient_GetTaskFiles_FileListFromTask_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6527,7 +7341,7 @@ public void Example_Batch_GetTaskFiles_FileListFromTask_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetTaskFiles_FileListFromTask_Convenience_Async() + public async Task Example_BatchClient_GetTaskFiles_FileListFromTask_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6540,7 +7354,7 @@ public async Task Example_Batch_GetTaskFiles_FileListFromTask_Convenience_Async( [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetNodes_NodeList() + public void Example_BatchClient_GetNodes_NodeList() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6555,7 +7369,7 @@ public void Example_Batch_GetNodes_NodeList() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetNodes_NodeList_Async() + public async Task Example_BatchClient_GetNodes_NodeList_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6570,7 +7384,7 @@ public async Task Example_Batch_GetNodes_NodeList_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetNodes_NodeList_Convenience() + public void Example_BatchClient_GetNodes_NodeList_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6583,7 +7397,7 @@ public void Example_Batch_GetNodes_NodeList_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetNodes_NodeList_Convenience_Async() + public async Task Example_BatchClient_GetNodes_NodeList_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6596,7 +7410,7 @@ public async Task Example_Batch_GetNodes_NodeList_Convenience_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetNodeExtensions_ListComputeNodeExtensions() + public void Example_BatchClient_GetNodeExtensions_ListComputeNodeExtensions() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6611,7 +7425,7 @@ public void Example_Batch_GetNodeExtensions_ListComputeNodeExtensions() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetNodeExtensions_ListComputeNodeExtensions_Async() + public async Task Example_BatchClient_GetNodeExtensions_ListComputeNodeExtensions_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6626,7 +7440,7 @@ public async Task Example_Batch_GetNodeExtensions_ListComputeNodeExtensions_Asyn [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetNodeExtensions_ListComputeNodeExtensions_Convenience() + public void Example_BatchClient_GetNodeExtensions_ListComputeNodeExtensions_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6639,7 +7453,7 @@ public void Example_Batch_GetNodeExtensions_ListComputeNodeExtensions_Convenienc [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetNodeExtensions_ListComputeNodeExtensions_Convenience_Async() + public async Task Example_BatchClient_GetNodeExtensions_ListComputeNodeExtensions_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6652,7 +7466,7 @@ public async Task Example_Batch_GetNodeExtensions_ListComputeNodeExtensions_Conv [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetNodeFiles_FileListFromNode() + public void Example_BatchClient_GetNodeFiles_FileListFromNode() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6667,7 +7481,7 @@ public void Example_Batch_GetNodeFiles_FileListFromNode() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetNodeFiles_FileListFromNode_Async() + public async Task Example_BatchClient_GetNodeFiles_FileListFromNode_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6682,7 +7496,7 @@ public async Task Example_Batch_GetNodeFiles_FileListFromNode_Async() [Test] [Ignore("Only validating compilation of examples")] - public void Example_Batch_GetNodeFiles_FileListFromNode_Convenience() + public void Example_BatchClient_GetNodeFiles_FileListFromNode_Convenience() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); @@ -6695,7 +7509,7 @@ public void Example_Batch_GetNodeFiles_FileListFromNode_Convenience() [Test] [Ignore("Only validating compilation of examples")] - public async Task Example_Batch_GetNodeFiles_FileListFromNode_Convenience_Async() + public async Task Example_BatchClient_GetNodeFiles_FileListFromNode_Convenience_Async() { Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); diff --git a/sdk/batch/Azure.Compute.Batch/tsp-location.yaml b/sdk/batch/Azure.Compute.Batch/tsp-location.yaml index 9daec1a1883a..2e56ef96de99 100644 --- a/sdk/batch/Azure.Compute.Batch/tsp-location.yaml +++ b/sdk/batch/Azure.Compute.Batch/tsp-location.yaml @@ -1,3 +1,4 @@ directory: specification/batch/Azure.Batch -commit: 5e579fc4b4b16fd902a0cd9d4032fef430f3a859 +commit: 1e2f83cabed31f97d2422b0ac9d3b6d906994141 repo: Azure/azure-rest-api-specs +additionalDirectories: